slam十四讲之设计slam系统
不多说了,都在注释里,我的流程就是先每句进行学习,然后整体看,后期直接看大块就行。
下面为前端 fronted.cpp
//
// Created by gaoxiang on 19-5-2.
//
#include <opencv2/opencv.hpp>
#include "myslam/algorithm.h"
#include "myslam/backend.h"
#include "myslam/config.h"
#include "myslam/feature.h"
#include "myslam/frontend.h"
#include "myslam/g2o_types.h"
#include "myslam/map.h"
#include "myslam/viewer.h"
#include <opencv2/imgproc.hpp>
namespace myslam {
Frontend::Frontend() {
gftt_ =
cv::GFTTDetector::create(Config::Get<int>("num_features"), 0.01, 20);//最大特征点数量 num_features 角点可以接受的最小特征值 检测到的角点的质量等级,角点特征值小于qualityLevel*最大特征值的点将被舍弃 0.01 角点最小距离 20
num_features_init_ = Config::Get<int>("num_features_init");
num_features_ = Config::Get<int>("num_features");
}
//添加新的图形进行处理 好的咋办 坏的咋办 丢失咋办 大的框架
bool Frontend::AddFrame(myslam::Frame::Ptr frame) {
current_frame_ = frame;
switch (status_) {
case FrontendStatus::INITING:
StereoInit();
break;
case FrontendStatus::TRACKING_GOOD:
case FrontendStatus::TRACKING_BAD:
Track();
break;
case FrontendStatus::LOST:
Reset();
break;
}
last_frame_ = current_frame_;//处理之后当前帧给前一帧
return true;
}
//初始化 正常追踪 追踪丢失
bool Frontend::Track() {
if (last_frame_) {
current_frame_->SetPose(relative_motion_ * last_frame_->Pose());//得到转换公式 两帧之间 用单目
}
int num_track_last = TrackLastFrame();//计算两帧像素点
tracking_inliers_ = EstimateCurrentPose();//估计位姿
if (tracking_inliers_ > num_features_tracking_) {//轨迹上有效点大于50 为好
// tracking good
status_ = FrontendStatus::TRACKING_GOOD;
} else if (tracking_inliers_ > num_features_tracking_bad_) {轨迹上有效点大于20 为不好
// tracking bad
status_ = FrontendStatus::TRACKING_BAD;
} else {
// lost
status_ = FrontendStatus::LOST;//否则就算跟踪丢失
}
InsertKeyframe();//有效点少要加点 用双目加点,插入关键帧
relative_motion_ = current_frame_->Pose() * last_frame_->Pose().inverse();//当前位姿 除以 上一时刻位姿,计算两帧变化
if (viewer_) viewer_->AddCurrentFrame(current_frame_);//viewer(整个录像)中增加当前帧(调整过的)
return true;
}
//上面是整个流程大致框架 下面具体实现各个功能
//插入关键帧 (指关键点比较少的帧)
bool Frontend::InsertKeyframe() {
if (tracking_inliers_ >= num_features_needed_for_keyframe_) {//有效点大于80
// still have enough features, don't insert keyframe
return false;//(没事)
}
// current frame is a new keyframe
current_frame_->SetKeyFrame();//当前帧作为关键帧
map_->InsertKeyFrame(current_frame_);//地图中插入当前帧
LOG(INFO) << "Set frame " << current_frame_->id_ << " as keyframe "//输出当前帧的id
<< current_frame_->keyframe_id_;
SetObservationsForKeyFrame();
DetectFeatures(); // detect new features寻找新特性
// track in right image 在右眼观测新的特征
FindFeaturesInRight();
// triangulate map points 计算深度
TriangulateNewPoints();
// update backend because we have a new keyframe 更新地图
backend_->UpdateMap();
if (viewer_) viewer_->UpdateMap();//把图像放入更新的地图中
return true;
}
//建立关键帧和观测点 得到世界坐标就能得到两帧的一个变化关系 用在后面优化
void Frontend::SetObservationsForKeyFrame() {
for (auto &feat : current_frame_->features_left_) {
auto mp = feat->map_point_.lock();
if (mp) mp->AddObservation(feat);//加入观测点
}
}
//对新点进行三角化
int Frontend::TriangulateNewPoints() {
std::vector<SE3> poses{camera_left_->pose(), camera_right_->pose()};
SE3 current_pose_Twc = current_frame_->Pose().inverse();//计算相机到世界的 T
int cnt_triangulated_pts = 0;
for (size_t i = 0; i < current_frame_->features_left_.size(); ++i) {
if (current_frame_->features_left_[i]->map_point_.expired() &&
current_frame_->features_right_[i] != nullptr) {
// 左图的特征点未关联地图点且存在右图匹配点,尝试三角化
std::vector<Vec3> points{
camera_left_->pixel2camera(
Vec2(current_frame_->features_left_[i]->position_.pt.x,
current_frame_->features_left_[i]->position_.pt.y)),
camera_right_->pixel2camera(
Vec2(current_frame_->features_right_[i]->position_.pt.x,
current_frame_->features_right_[i]->position_.pt.y))};
Vec3 pworld = Vec3::Zero();
if (triangulation(poses, points, pworld) && pworld[2] > 0) { //如果求出深度信息
auto new_map_point = MapPoint::CreateNewMappoint();
pworld = current_pose_Twc * pworld;//求出当前世界坐标
new_map_point->SetPos(pworld);
new_map_point->AddObservation(
current_frame_->features_left_[i]);//把坐标点信息加入新地图的坐标点中
new_map_point->AddObservation(
current_frame_->features_right_[i]);
current_frame_->features_left_[i]->map_point_ = new_map_point;//更新左右眼的帧中的坐标点信息
current_frame_->features_right_[i]->map_point_ = new_map_point;
map_->InsertMapPoint(new_map_point);//在地图中插入地图点
cnt_triangulated_pts++;//已经三角化点的数目
}
}
}
LOG(INFO) << "new landmarks: " << cnt_triangulated_pts;//输入新的角点
return cnt_triangulated_pts;
}
//估计当前位姿 pose与pose 之间进行优化 上面计算出了 T 进行图优化 局部优化 后端口是对整个视频进行优化
int Frontend::EstimateCurrentPose() {
// setup g2o
typedef g2o::BlockSolver_6_3 BlockSolverType;
typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType>
LinearSolverType;
auto solver = new g2o::OptimizationAlgorithmLevenberg(
g2o::make_unique<BlockSolverType>(
g2o::make_unique<LinearSolverType>()));
g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm(solver);
// vertex
VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose
vertex_pose->setId(0);
vertex_pose->setEstimate(current_frame_->Pose());
optimizer.addVertex(vertex_pose);
// K
Mat33 K = camera_left_->K();
// edges
int index = 1;
std::vector<EdgeProjectionPoseOnly *> edges;
std::vector<Feature::Ptr> features;
for (size_t i = 0; i < current_frame_->features_left_.size(); ++i) {
auto mp = current_frame_->features_left_[i]->map_point_.lock();
if (mp) {
features.push_back(current_frame_->features_left_[i]);
EdgeProjectionPoseOnly *edge =
new EdgeProjectionPoseOnly(mp->pos_, K);
edge->setId(index);
edge->setVertex(0, vertex_pose);
edge->setMeasurement(
toVec2(current_frame_->features_left_[i]->position_.pt));//左眼观测的空间点的位置
edge->setInformation(Eigen::Matrix2d::Identity());
edge->setRobustKernel(new g2o::RobustKernelHuber);
edges.push_back(edge);
optimizer.addEdge(edge);
index++;
}
}
// estimate the Pose the determine the outliers 估计位姿找外点
const double chi2_th = 5.991;//robust kernel 阈值
int cnt_outlier = 0;
for (int iteration = 0; iteration < 4; ++iteration) {
vertex_pose->setEstimate(current_frame_->Pose());
optimizer.initializeOptimization();
optimizer.optimize(10);
cnt_outlier = 0;
// count the outliers 计算外点数量 就是误差过大的点
for (size_t i = 0; i < edges.size(); ++i) {
auto e = edges[i];
if (features[i]->is_outlier_) {
e->computeError();
}
if (e->chi2() > chi2_th) {
features[i]->is_outlier_ = true;
e->setLevel(1);
cnt_outlier++;
} else {
features[i]->is_outlier_ = false;
e->setLevel(0);
};
if (iteration == 2) {
e->setRobustKernel(nullptr);//鲁棒内核函数 防止误差过大
}
}
}
LOG(INFO) << "Outlier/Inlier in pose estimating: " << cnt_outlier << "/"//输出外点和内点
<< features.size() - cnt_outlier;
// Set pose and outlier
current_frame_->SetPose(vertex_pose->estimate());
LOG(INFO) << "Current Pose = \n" << current_frame_->Pose().matrix();//输出优化后的位姿
for (auto &feat : features) {
if (feat->is_outlier_) {
feat->map_point_.reset();//在地图空间点中去除了外点
feat->is_outlier_ = false; // maybe we can still use it in future
}
}
return features.size() - cnt_outlier;//保留有效点 内点
}
//利用上一帧估计出当前帧的像素坐标用于后面计算误差,并把匹配好的点放置当前帧下
int Frontend::TrackLastFrame() {
// use LK flow to estimate points in the right image 已知 T 估计 当前帧下的坐标 利用空间坐标求出当前帧与上一帧的像素坐标
std::vector<cv::Point2f> kps_last, kps_current;
for (auto &kp : last_frame_->features_left_) {
if (kp->map_point_.lock()) {//若有空间点信息
// use project point
auto mp = kp->map_point_.lock();//锁定的值给mp然后在更新优化
auto px =
camera_left_->world2pixel(mp->pos_, current_frame_->Pose());//需要 T 和当前帧的世界坐标
kps_last.push_back(kp->position_.pt);
kps_current.push_back(cv::Point2f(px[0], px[1]));
} else {
kps_last.push_back(kp->position_.pt);
kps_current.push_back(kp->position_.pt);
}
}
std::vector<uchar> status;
Mat error;
//光流追踪 把好点放入当前帧
cv::calcOpticalFlowPyrLK(
last_frame_->left_img_, current_frame_->left_img_, kps_last,
kps_current, status, error, cv::Size(11, 11), 3,// 金字塔的窗口尺寸 金字塔的层数 status数组。如果对应特征的光流被发现,数组中的每一个元素都被设置为 1, 否则设置为 0
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
0.01),//表示迭代的终止条件,达到最大迭代次数 和 达到阈值均停止迭代 30 最大迭代次数 0.01 阈值
cv::OPTFLOW_USE_INITIAL_FLOW);//初始条件 选择光流作为初始
int num_good_pts = 0;
for (size_t i = 0; i < status.size(); ++i) {
if (status[i]) {//找到状态置 1 说明匹配成功
cv::KeyPoint kp(kps_current[i], 7);// 7 关键点的直径 ?
Feature::Ptr feature(new Feature(current_frame_, kp));//把点和当前帧 形成新的feature
feature->map_point_ = last_frame_->features_left_[i]->map_point_;//上一帧的空间点放入当前新特征中
current_frame_->features_left_.push_back(feature);//把新的特征放入当前帧中
num_good_pts++;//增加一个好点
}
}
LOG(INFO) << "Find " << num_good_pts << " in the last image.";
return num_good_pts;
}
//建立初始地图
//双目相机初始化
bool Frontend::StereoInit() {
int num_features_left = DetectFeatures();
int num_coor_features = FindFeaturesInRight();
if (num_coor_features < num_features_init_) {
return false;
}
bool build_map_success = BuildInitMap();//地图初始化
if (build_map_success) {
status_ = FrontendStatus::TRACKING_GOOD;//追踪是合格的
if (viewer_) { //viewer 是一个整体每幅地图构成
viewer_->AddCurrentFrame(current_frame_);//加入当前帧 更新地图
viewer_->UpdateMap();
}
return true;
}
return false;
}
///追踪特征点 画图小矩形
int Frontend::DetectFeatures() {
cv::Mat mask(current_frame_->left_img_.size(), CV_8UC1, 255);
for (auto &feat : current_frame_->features_left_) {
cv::rectangle(mask, feat->position_.pt - cv::Point2f(10, 10),//矩形对角线的两个顶点
feat->position_.pt + cv::Point2f(10, 10), 0, cv::FILLED);//绘制矩形
}
std::vector<cv::KeyPoint> keypoints;
gftt_->detect(current_frame_->left_img_, keypoints, mask);
int cnt_detected = 0;
for (auto &kp : keypoints) {
current_frame_->features_left_.push_back(
Feature::Ptr(new Feature(current_frame_, kp)));//把过去的特征加入新特征中
cnt_detected++;
}
LOG(INFO) << "Detect " << cnt_detected << " new features";
return cnt_detected;
}
//在右图中发现特征 计算在右图像素点
int Frontend::FindFeaturesInRight() {
// use LK flow to estimate points in the right image在右视图中估计位姿
std::vector<cv::Point2f> kps_left, kps_right;
for (auto &kp : current_frame_->features_left_) {
kps_left.push_back(kp->position_.pt);
auto mp = kp->map_point_.lock();
if (mp) {
// use projected points as initial guess
auto px =
camera_right_->world2pixel(mp->pos_, current_frame_->Pose());//用世界坐标 与 T 求出在右眼的像素点
kps_right.push_back(cv::Point2f(px[0], px[1]));
} else {
// use same pixel in left iamge
kps_right.push_back(kp->position_.pt);
}
}
//计算误差,并优化
std::vector<uchar> status;
Mat error;
cv::calcOpticalFlowPyrLK(
current_frame_->left_img_, current_frame_->right_img_, kps_left,
kps_right, status, error, cv::Size(11, 11), 3,
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
0.01),
cv::OPTFLOW_USE_INITIAL_FLOW);
//在右眼中找到多少匹配上的特征点
int num_good_pts = 0;
for (size_t i = 0; i < status.size(); ++i) {
if (status[i]) {//若状态置1 表示追踪到匹配点
cv::KeyPoint kp(kps_right[i], 7);//特针点 和 直径
Feature::Ptr feat(new Feature(current_frame_, kp));//特征点放入新特征中
feat->is_on_left_image_ = false;//左眼不动
current_frame_->features_right_.push_back(feat);//更新右眼
num_good_pts++;
} else {
current_frame_->features_right_.push_back(nullptr);
}
}
LOG(INFO) << "Find " << num_good_pts << " in the right image.";
return num_good_pts; //好点为返回值
}
//建立地图
bool Frontend::BuildInitMap() {
std::vector<SE3> poses{camera_left_->pose(), camera_right_->pose()};
size_t cnt_init_landmarks = 0;
//找左右图匹配点 然后计算深度 在把匹配好的点放入每一帧图像
for (size_t i = 0; i < current_frame_->features_left_.size(); ++i) {
if (current_frame_->features_right_[i] == nullptr) continue;
// create map point from triangulation
std::vector<Vec3> points{
camera_left_->pixel2camera(
Vec2(current_frame_->features_left_[i]->position_.pt.x,
current_frame_->features_left_[i]->position_.pt.y)),
camera_right_->pixel2camera(
Vec2(current_frame_->features_right_[i]->position_.pt.x,
current_frame_->features_right_[i]->position_.pt.y))};
Vec3 pworld = Vec3::Zero();
//把三角化成功的 并且算出深度的 点放到该帧图像中
if (triangulation(poses, points, pworld) && pworld[2] > 0) {
auto new_map_point = MapPoint::CreateNewMappoint();
new_map_point->SetPos(pworld);
new_map_point->AddObservation(current_frame_->features_left_[i]);
new_map_point->AddObservation(current_frame_->features_right_[i]);
current_frame_->features_left_[i]->map_point_ = new_map_point;
current_frame_->features_right_[i]->map_point_ = new_map_point;
cnt_init_landmarks++;
map_->InsertMapPoint(new_map_point);
}
}
current_frame_->SetKeyFrame();
map_->InsertKeyFrame(current_frame_);//把当前帧加入地图中
backend_->UpdateMap();//后端更新地图
LOG(INFO) << "Initial map created with " << cnt_init_landmarks
<< " map points";
return true;
}
//前端重置
bool Frontend::Reset() {
LOG(INFO) << "Reset is not implemented. ";
return true;
}
} // namespace myslam