-
Notifications
You must be signed in to change notification settings - Fork 91
/
config.yaml
124 lines (96 loc) · 4.39 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
%YAML:1.0
# ==============================================
# =============== Select dataset ===============
# ==============================================
# Choose one of the following two dataset.
dataset_name: "matlab"
# dataset_name: "fr1_desk"
matlab: # New Tsukuba Stereo dataset used in matlab tutorial
# -- Data dir
dataset_dir: data/dataset_images_matlab
num_images: 150
# -- Camera intrinsics (The images should all be undistorted)
camera_info.fx: 615
camera_info.fy: 615
camera_info.cx: 320
camera_info.cy: 240
# -- Load groundtruth trajectory
is_draw_true_traj: "true" # Ground truth traj's color is set as green. Estimated is set as white.
true_traj_filename: data/test_data/cam_traj_truth.txt
# Format of ground-truth data:
# In each row, there are 12 numbers representing the transformation matrix T=[R|t].
# Order of the 12 numbers: tx, ty, tz, R[:,0], R[:,1], R[:,2]
# Or say: T[0,3],T[1,3],T[2,3],T[0,0],T[1,0],T[2,0],T[0,1],T[1,1],T[2,1],T[0,2],T[1,2],T[2,2]
# See this "readPoseFromFile" in "io.cpp" for more details.
fr1_desk: # fr1_desk dataset
# https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats
# -- Data dir
dataset_dir: /home/feiyu/Documents/Projects/2018-winter/EECS432_CV_VO/data/dataset_images_fr1_desk
num_images: 150
# -- Camera intrinsics (The images should all be undistorted)
camera_info.fx: 517.3
camera_info.fy: 516.5
camera_info.cx: 325.1
camera_info.cy: 249.7
# -- Load groundtruth trajectory
is_draw_true_traj: "false"
# ================================================
# =============== General settings ===============
# ================================================
max_num_imgs_to_proc: 300
# is_pcl_wait_for_keypress: "true" # If true, PCL Viewer will stop after each update, and wait for your keypress.
is_pcl_wait_for_keypress: "false"
cv_waitkey_time: 1
save_predicted_traj_to: data/test_data/cam_traj.txt
output_folder: "output"
# ==============================================================
# =============== Parameters of Visual Odometry ===============
# ==============================================================
# ------------------- Feature Matching -------------------
# ORB settings
number_of_keypoints_to_extract: 8000
max_number_of_keypoints: 1500
scale_factor: 1.2
level_pyramid: 4
score_threshold: 20
# ------------------- Feature Matching -------------------
feature_match_method_index_initialization: 1
feature_match_method_index_triangulation: 1
feature_match_method_index_pnp: 1
# Matching descriptos
feature_match_method_index: 3
# method 1: the method in Dr. Xiang Gao's slambook:
# distance_threshold = max<float>(min_dis * match_ratio, 30.0);
# method 2: the method in Lowe's 2004 paper:
# min dis / second min dis < 0.8
# method 3: match a point with its neighboring points and then apply Dr. Xiang Gao's criteria.
xiang_gao_method_match_ratio: 2 # This is for method 1 used in Dr. Xiang Gao's slambook:
lowe_method_dist_ratio: 0.8 # This is for method 2 of Lowe's 2004 SIFT paper
method_3_feature_dist_threshold: 50.0
# Method 3 parameters:
max_matching_pixel_dist_in_initialization: 100
max_matching_pixel_dist_in_triangulation: 100
max_matching_pixel_dist_in_pnp: 50
# remove wrong matches
kpts_uniform_selection_grid_size: 16
kpts_uniform_selection_max_pts_per_grid: 8
# ------------------- RANSAC Essential matrix -------------------
findEssentialMat_prob: 0.999
findEssentialMat_threshold: 1.0
# ------------------- Triangulation -------------------
min_triang_angle: 1.0
max_ratio_between_max_angle_and_median_angle: 20
# ------------------- Initialization -------------------
min_inlier_matches: 15
min_pixel_dist: 50
min_median_triangulation_angle: 2.0
assumed_mean_pts_depth_during_vo_init: 0.8
# ------------------- Tracking -------------------
min_dist_between_two_keyframes: 0.03
max_possible_dist_to_prev_keyframe: 0.3
# ------------------- Optimization -------------------
is_enable_ba: "true" # Use bundle adjustment for camera and points in single frame. 1 for true, 0 for false
num_prev_frames_to_opti_by_ba: 5 # <= 20. I set the "kBuffSize_" in "vo.h" as 20, so only previous 20 frames are stored.
information_matrix: "1.0 0.0 0.0 1.0"
is_ba_fix_map_points: "true" # TO DEBUG: If I set it to true and optimize both camera pose and map points, there is huge error.
# UPDATE_MAP_PTS: "" # This equals (!is_ba_fix_map_points) by default