-
Notifications
You must be signed in to change notification settings - Fork 78
/
Copy pathmarginalization_factor.cpp
421 lines (369 loc) · 18.8 KB
/
marginalization_factor.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
/*******************************************************
* Copyright (C) 2019, Aerial Robotics Group, Hong Kong University of Science and Technology
*
* This file is part of VINS.
*
* Licensed under the GNU General Public License v3.0;
* you may not use this file except in compliance with the License.
*******************************************************/
#include "marginalization_factor.h"
void ResidualBlockInfo::Evaluate()
{
residuals.resize(cost_function->num_residuals());
std::vector<int> block_sizes = cost_function->parameter_block_sizes();
raw_jacobians = new double *[block_sizes.size()];
jacobians.resize(block_sizes.size());
for (int i = 0; i < static_cast<int>(block_sizes.size()); i++)
{
jacobians[i].resize(cost_function->num_residuals(), block_sizes[i]);
raw_jacobians[i] = jacobians[i].data();
//dim += block_sizes[i] == 7 ? 6 : block_sizes[i];
}
cost_function->Evaluate(parameter_blocks.data(), residuals.data(), raw_jacobians);
//std::vector<int> tmp_idx(block_sizes.size());
//Eigen::MatrixXd tmp(dim, dim);
//for (int i = 0; i < static_cast<int>(parameter_blocks.size()); i++)
//{
// int size_i = localSize(block_sizes[i]);
// Eigen::MatrixXd jacobian_i = jacobians[i].leftCols(size_i);
// for (int j = 0, sub_idx = 0; j < static_cast<int>(parameter_blocks.size()); sub_idx += block_sizes[j] == 7 ? 6 : block_sizes[j], j++)
// {
// int size_j = localSize(block_sizes[j]);
// Eigen::MatrixXd jacobian_j = jacobians[j].leftCols(size_j);
// tmp_idx[j] = sub_idx;
// tmp.block(tmp_idx[i], tmp_idx[j], size_i, size_j) = jacobian_i.transpose() * jacobian_j;
// }
//}
//Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes(tmp);
//std::cout << saes.eigenvalues() << std::endl;
//ROS_ASSERT(saes.eigenvalues().minCoeff() >= -1e-6);
if (loss_function)
{
double residual_scaling_, alpha_sq_norm_;
double sq_norm, rho[3];
sq_norm = residuals.squaredNorm();
loss_function->Evaluate(sq_norm, rho);
//printf("sq_norm: %f, rho[0]: %f, rho[1]: %f, rho[2]: %f\n", sq_norm, rho[0], rho[1], rho[2]);
double sqrt_rho1_ = sqrt(rho[1]);
if ((sq_norm == 0.0) || (rho[2] <= 0.0))
{
residual_scaling_ = sqrt_rho1_;
alpha_sq_norm_ = 0.0;
}
else
{
const double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1];
const double alpha = 1.0 - sqrt(D);
residual_scaling_ = sqrt_rho1_ / (1 - alpha);
alpha_sq_norm_ = alpha / sq_norm;
}
for (int i = 0; i < static_cast<int>(parameter_blocks.size()); i++)
{
jacobians[i] = sqrt_rho1_ * (jacobians[i] - alpha_sq_norm_ * residuals * (residuals.transpose() * jacobians[i]));
}
residuals *= residual_scaling_;
}
}
MarginalizationInfo::~MarginalizationInfo()
{
//ROS_WARN("release marginlizationinfo");
for (auto it = parameter_block_data.begin(); it != parameter_block_data.end(); ++it)
delete it->second;
for (int i = 0; i < (int)factors.size(); i++)
{
delete[] factors[i]->raw_jacobians;
delete factors[i]->cost_function;
delete factors[i];
}
}
// 将定义的 residual_block_info 添加到 marginalization_info
// 这里其实就是分别将不同损失函数对应的优化变量、边缘化位置存入到 parameter_block_sizes 和 parameter_block_idx 中,
// 这里注意的是执行到这一步, parameter_block_idx 中仅仅有待边缘化的优化变量的内存地址的key,而且其对应value全部为0
void MarginalizationInfo::addResidualBlockInfo(ResidualBlockInfo *residual_block_info)
{
factors.emplace_back(residual_block_info);
std::vector<double *> ¶meter_blocks = residual_block_info->parameter_blocks;//parameter_blocks里面放的是marg相关的变量
std::vector<int> parameter_block_sizes = residual_block_info->cost_function->parameter_block_sizes();
for (int i = 0; i < static_cast<int>(residual_block_info->parameter_blocks.size()); i++)//这里应该是优化的变量
{
double *addr = parameter_blocks[i];//指向数据的指针
int size = parameter_block_sizes[i];//因为仅仅有地址不行,还需要有地址指向的这个数据的长度
parameter_block_size[reinterpret_cast<long>(addr)] = size;//将指针强转为数据的地址
}
for (int i = 0; i < static_cast<int>(residual_block_info->drop_set.size()); i++)//这里应该是待边缘化的变量
{
double *addr = parameter_blocks[residual_block_info->drop_set[i]];//这个是待边缘化的变量的id
parameter_block_idx[reinterpret_cast<long>(addr)] = 0;//将需要marg的变量的id存入parameter_block_idx
}
}
// 之前通过调用 addResidualBlockInfo() 已经确定marg变量的数量、存储位置、长度以及待优化变量的数量以及存储位置,下面就需要调用 preMarginalize() 进行预处理
void MarginalizationInfo::preMarginalize()
{
for (auto it : factors)//在前面的 addResidualBlockInfo 中会将不同的残差块加入到factor中
{
it->Evaluate();//利用c++的多态性分别计算所有状态变量构成的残差和雅克比矩阵,其实就是调用各个损失函数中的重载函数Evaluate()
std::vector<int> block_sizes = it->cost_function->parameter_block_sizes();
for (int i = 0; i < static_cast<int>(block_sizes.size()); i++)
{
long addr = reinterpret_cast<long>(it->parameter_blocks[i]);//优化变量的地址
int size = block_sizes[i];
if (parameter_block_data.find(addr) == parameter_block_data.end())//parameter_block_data是整个优化变量的数据
{
double *data = new double[size];
memcpy(data, it->parameter_blocks[i], sizeof(double) * size);//重新开辟一块内存
parameter_block_data[addr] = data;//通过之前的优化变量的数据的地址和新开辟的内存数据进行关联
}
}
}
}
int MarginalizationInfo::localSize(int size) const
{
return size == 7 ? 6 : size;
}
int MarginalizationInfo::globalSize(int size) const
{
return size == 6 ? 7 : size;
}
void* ThreadsConstructA(void* threadsstruct)
{
ThreadsStruct* p = ((ThreadsStruct*)threadsstruct);
for (auto it : p->sub_factors)
{
for (int i = 0; i < static_cast<int>(it->parameter_blocks.size()); i++)
{
int idx_i = p->parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[i])];
int size_i = p->parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[i])];
if (size_i == 7)
size_i = 6;
Eigen::MatrixXd jacobian_i = it->jacobians[i].leftCols(size_i);
for (int j = i; j < static_cast<int>(it->parameter_blocks.size()); j++)
{
int idx_j = p->parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[j])];
int size_j = p->parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[j])];
if (size_j == 7)
size_j = 6;
Eigen::MatrixXd jacobian_j = it->jacobians[j].leftCols(size_j);
if (i == j)
p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;
else
{
p->A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;
p->A.block(idx_j, idx_i, size_j, size_i) = p->A.block(idx_i, idx_j, size_i, size_j).transpose();
}
}
p->b.segment(idx_i, size_i) += jacobian_i.transpose() * it->residuals;
}
}
return threadsstruct;
}
// 正式开始进行边缘化操作
void MarginalizationInfo::marginalize()
{
/* -------------------会先补充parameter_block_idx,前面提到经过addResidualBlockInfo()函数仅仅带边缘化的优化变量在parameter_block_idx有key值,
这里会将保留的优化变量的内存地址作为key值补充进去,并统一他们的value值是其前面已经放入parameter_block_idx的优化变量的维度之和,同时这里会计算出两个变量m和n,
他们分别是待边缘化的优化变量的维度和以及保留的优化变量的维度和。*/
int pos = 0;//pos为所有变量维度
for (auto &it : parameter_block_idx)//遍历待marg的优化变量的内存地址
{
it.second = pos;
pos += localSize(parameter_block_size[it.first]);
}
m = pos;//需要marg掉的变量个数
for (const auto &it : parameter_block_size)
{
if (parameter_block_idx.find(it.first) == parameter_block_idx.end())//如果这个变量不是是待marg的优化变量
{
parameter_block_idx[it.first] = pos;//就将这个待marg的变量id设为pos
pos += localSize(it.second);//pos加上这个变量的长度
}
}
n = pos - m;//要保留下来的变量个数
//ROS_INFO("marginalization, pos: %d, m: %d, n: %d, size: %d", pos, m, n, (int)parameter_block_idx.size());
if(m == 0)
{
valid = false;
printf("unstable tracking...\n");
return;
}
//通过上面的操作就会将所有的优化变量进行一个伪排序,待marg的优化变量的idx为0,其他的和起所在的位置相关
TicToc t_summing;
Eigen::MatrixXd A(pos, pos);;//整个矩阵A的大小
Eigen::VectorXd b(pos);
A.setZero();
b.setZero();
/*
for (auto it : factors)
{
for (int i = 0; i < static_cast<int>(it->parameter_blocks.size()); i++)
{
int idx_i = parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[i])];
int size_i = localSize(parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[i])]);
Eigen::MatrixXd jacobian_i = it->jacobians[i].leftCols(size_i);
for (int j = i; j < static_cast<int>(it->parameter_blocks.size()); j++)
{
int idx_j = parameter_block_idx[reinterpret_cast<long>(it->parameter_blocks[j])];
int size_j = localSize(parameter_block_size[reinterpret_cast<long>(it->parameter_blocks[j])]);
Eigen::MatrixXd jacobian_j = it->jacobians[j].leftCols(size_j);
if (i == j)
A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;
else
{
A.block(idx_i, idx_j, size_i, size_j) += jacobian_i.transpose() * jacobian_j;
A.block(idx_j, idx_i, size_j, size_i) = A.block(idx_i, idx_j, size_i, size_j).transpose();
}
}
b.segment(idx_i, size_i) += jacobian_i.transpose() * it->residuals;
}
}
ROS_INFO("summing up costs %f ms", t_summing.toc());
*/
//multi thread
//---------------函数会通过多线程快速构造各个残差对应的各个优化变量的信息矩阵(雅克比和残差前面都已经求出来了),然后在加起来.
//---------------因为这里构造信息矩阵时采用的正是parameter_block_idx作为构造顺序,因此,就会自然而然地将待边缘化的变量构造在矩阵的左上方。
TicToc t_thread_summing;
pthread_t tids[NUM_THREADS];
ThreadsStruct threadsstruct[NUM_THREADS];
int i = 0;
for (auto it : factors)//将各个残差块的雅克比矩阵分配到各个线程中去
{
threadsstruct[i].sub_factors.push_back(it);
i++;
i = i % NUM_THREADS;
}
for (int i = 0; i < NUM_THREADS; i++)
{
TicToc zero_matrix;
threadsstruct[i].A = Eigen::MatrixXd::Zero(pos,pos);
threadsstruct[i].b = Eigen::VectorXd::Zero(pos);
threadsstruct[i].parameter_block_size = parameter_block_size;
threadsstruct[i].parameter_block_idx = parameter_block_idx;
int ret = pthread_create( &tids[i], NULL, ThreadsConstructA ,(void*)&(threadsstruct[i]));//分别构造矩阵
if (ret != 0)
if (ret != 0)
{
ROS_WARN("pthread_create error");
ROS_BREAK();
}
}
for( int i = NUM_THREADS - 1; i >= 0; i--)
{
pthread_join( tids[i], NULL );
A += threadsstruct[i].A;
b += threadsstruct[i].b;
}
//ROS_DEBUG("thread summing up costs %f ms", t_thread_summing.toc());
//ROS_INFO("A diff %f , b diff %f ", (A - tmp_A).sum(), (b - tmp_b).sum());
//TODO
Eigen::MatrixXd Amm = 0.5 * (A.block(0, 0, m, m) + A.block(0, 0, m, m).transpose());
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes(Amm);
//ROS_ASSERT_MSG(saes.eigenvalues().minCoeff() >= -1e-4, "min eigenvalue %f", saes.eigenvalues().minCoeff());
Eigen::MatrixXd Amm_inv = saes.eigenvectors() * Eigen::VectorXd((saes.eigenvalues().array() > eps).select(saes.eigenvalues().array().inverse(), 0)).asDiagonal() * saes.eigenvectors().transpose();
//printf("error1: %f\n", (Amm * Amm_inv - Eigen::MatrixXd::Identity(m, m)).sum());
// -----------函数会通过shur补操作进行边缘化,然后再从边缘化后的信息矩阵中恢复出来雅克比矩阵 linearized_jacobians 和残差 linearized_residuals ,
//------------这两者会作为先验残差带入到下一轮的先验残差的雅克比和残差的计算当中去。
//舒尔补
Eigen::VectorXd bmm = b.segment(0, m);
Eigen::MatrixXd Amr = A.block(0, m, m, n);
Eigen::MatrixXd Arm = A.block(m, 0, n, m);
Eigen::MatrixXd Arr = A.block(m, m, n, n);
Eigen::VectorXd brr = b.segment(m, n);
A = Arr - Arm * Amm_inv * Amr;//这里的A和b应该都是marg过的A和b,大小是发生了变化的
b = brr - Arm * Amm_inv * bmm;
//下面就是更新先验残差项
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes2(A);//求特征值
Eigen::VectorXd S = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array(), 0));
Eigen::VectorXd S_inv = Eigen::VectorXd((saes2.eigenvalues().array() > eps).select(saes2.eigenvalues().array().inverse(), 0));
Eigen::VectorXd S_sqrt = S.cwiseSqrt();
Eigen::VectorXd S_inv_sqrt = S_inv.cwiseSqrt();
linearized_jacobians = S_sqrt.asDiagonal() * saes2.eigenvectors().transpose();
linearized_residuals = S_inv_sqrt.asDiagonal() * saes2.eigenvectors().transpose() * b;
//std::cout << A << std::endl
// << std::endl;
//std::cout << linearized_jacobians << std::endl;
//printf("error2: %f %f\n", (linearized_jacobians.transpose() * linearized_jacobians - A).sum(),
// (linearized_jacobians.transpose() * linearized_residuals - b).sum());
}
std::vector<double *> MarginalizationInfo::getParameterBlocks(std::unordered_map<long, double *> &addr_shift)
{
std::vector<double *> keep_block_addr;
keep_block_size.clear();
keep_block_idx.clear();
keep_block_data.clear();
for (const auto &it : parameter_block_idx)
{
if (it.second >= m)
{
keep_block_size.push_back(parameter_block_size[it.first]);
keep_block_idx.push_back(parameter_block_idx[it.first]);
keep_block_data.push_back(parameter_block_data[it.first]);
keep_block_addr.push_back(addr_shift[it.first]);
}
}
sum_block_size = std::accumulate(std::begin(keep_block_size), std::end(keep_block_size), 0);
return keep_block_addr;
}
// 先验残差的损失函数
MarginalizationFactor::MarginalizationFactor(MarginalizationInfo* _marginalization_info):marginalization_info(_marginalization_info)
{
int cnt = 0;
for (auto it : marginalization_info->keep_block_size)
{
mutable_parameter_block_sizes()->push_back(it);
cnt += it;
}
//printf("residual size: %d, %d\n", cnt, n);
set_num_residuals(marginalization_info->n);
};
/* Evaluate 计算所有状态变量构成的残差和雅克比矩阵
这个函数通过传入的优化变量值parameters,以及先验值(对于先验残差就是上一时刻的先验残差last_marginalization_info,
对于IMU就是预计分值pre_integrations[1],对于视觉就是空间的的像素坐标pts_i, pts_j)
可以计算出各项残差值residuals,以及残差对应个优化变量的雅克比矩阵jacobians。
参考:https://blog.csdn.net/weixin_44580210/article/details/95748091*/
bool MarginalizationFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const
{
//printf("internal addr,%d, %d\n", (int)parameter_block_sizes().size(), num_residuals());
//for (int i = 0; i < static_cast<int>(keep_block_size.size()); i++)
//{
// //printf("unsigned %x\n", reinterpret_cast<unsigned long>(parameters[i]));
// //printf("signed %x\n", reinterpret_cast<long>(parameters[i]));
//printf("jacobian %x\n", reinterpret_cast<long>(jacobians));
//printf("residual %x\n", reinterpret_cast<long>(residuals));
//}
int n = marginalization_info->n;
int m = marginalization_info->m;
Eigen::VectorXd dx(n);
for (int i = 0; i < static_cast<int>(marginalization_info->keep_block_size.size()); i++)
{
int size = marginalization_info->keep_block_size[i];
int idx = marginalization_info->keep_block_idx[i] - m;
Eigen::VectorXd x = Eigen::Map<const Eigen::VectorXd>(parameters[i], size);
Eigen::VectorXd x0 = Eigen::Map<const Eigen::VectorXd>(marginalization_info->keep_block_data[i], size);
if (size != 7)
dx.segment(idx, size) = x - x0;
else
{
dx.segment<3>(idx + 0) = x.head<3>() - x0.head<3>();
dx.segment<3>(idx + 3) = 2.0 * Utility::positify(Eigen::Quaterniond(x0(6), x0(3), x0(4), x0(5)).inverse() * Eigen::Quaterniond(x(6), x(3), x(4), x(5))).vec();
if (!((Eigen::Quaterniond(x0(6), x0(3), x0(4), x0(5)).inverse() * Eigen::Quaterniond(x(6), x(3), x(4), x(5))).w() >= 0))
{
dx.segment<3>(idx + 3) = 2.0 * -Utility::positify(Eigen::Quaterniond(x0(6), x0(3), x0(4), x0(5)).inverse() * Eigen::Quaterniond(x(6), x(3), x(4), x(5))).vec();
}
}
}
Eigen::Map<Eigen::VectorXd>(residuals, n) = marginalization_info->linearized_residuals + marginalization_info->linearized_jacobians * dx;
if (jacobians)
{
for (int i = 0; i < static_cast<int>(marginalization_info->keep_block_size.size()); i++)
{
if (jacobians[i])
{
int size = marginalization_info->keep_block_size[i], local_size = marginalization_info->localSize(size);
int idx = marginalization_info->keep_block_idx[i] - m;
Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> jacobian(jacobians[i], n, size);
jacobian.setZero();
jacobian.leftCols(local_size) = marginalization_info->linearized_jacobians.middleCols(idx, local_size);
}
}
}
return true;
}