1 // Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //     * Redistributions of source code must retain the above copyright
8 //       notice, this list of conditions and the following disclaimer.
9 //
10 //     * Redistributions in binary form must reproduce the above copyright
11 //       notice, this list of conditions and the following disclaimer in the
12 //       documentation and/or other materials provided with the distribution.
13 //
14 //     * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
15 //       its contributors may be used to endorse or promote products derived
16 //       from this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 // POSSIBILITY OF SUCH DAMAGE.
29 //
30 // Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
31 
32 #include "estimators/pose.h"
33 
34 #include "base/camera_models.h"
35 #include "base/cost_functions.h"
36 #include "base/essential_matrix.h"
37 #include "base/pose.h"
38 #include "estimators/absolute_pose.h"
39 #include "estimators/essential_matrix.h"
40 #include "optim/bundle_adjustment.h"
41 #include "util/matrix.h"
42 #include "util/misc.h"
43 #include "util/threading.h"
44 
45 namespace colmap {
46 namespace {
47 
48 typedef LORANSAC<P3PEstimator, EPNPEstimator> AbsolutePoseRANSAC;
49 
EstimateAbsolutePoseKernel(const Camera & camera,const double focal_length_factor,const std::vector<Eigen::Vector2d> & points2D,const std::vector<Eigen::Vector3d> & points3D,const RANSACOptions & options,AbsolutePoseRANSAC::Report * report)50 void EstimateAbsolutePoseKernel(const Camera& camera,
51                                 const double focal_length_factor,
52                                 const std::vector<Eigen::Vector2d>& points2D,
53                                 const std::vector<Eigen::Vector3d>& points3D,
54                                 const RANSACOptions& options,
55                                 AbsolutePoseRANSAC::Report* report) {
56   // Scale the focal length by the given factor.
57   Camera scaled_camera = camera;
58   const std::vector<size_t>& focal_length_idxs = camera.FocalLengthIdxs();
59   for (const size_t idx : focal_length_idxs) {
60     scaled_camera.Params(idx) *= focal_length_factor;
61   }
62 
63   // Normalize image coordinates with current camera hypothesis.
64   std::vector<Eigen::Vector2d> points2D_N(points2D.size());
65   for (size_t i = 0; i < points2D.size(); ++i) {
66     points2D_N[i] = scaled_camera.ImageToWorld(points2D[i]);
67   }
68 
69   // Estimate pose for given focal length.
70   auto custom_options = options;
71   custom_options.max_error =
72       scaled_camera.ImageToWorldThreshold(options.max_error);
73   AbsolutePoseRANSAC ransac(custom_options);
74   *report = ransac.Estimate(points2D_N, points3D);
75 }
76 
77 }  // namespace
78 
EstimateAbsolutePose(const AbsolutePoseEstimationOptions & options,const std::vector<Eigen::Vector2d> & points2D,const std::vector<Eigen::Vector3d> & points3D,Eigen::Vector4d * qvec,Eigen::Vector3d * tvec,Camera * camera,size_t * num_inliers,std::vector<char> * inlier_mask)79 bool EstimateAbsolutePose(const AbsolutePoseEstimationOptions& options,
80                           const std::vector<Eigen::Vector2d>& points2D,
81                           const std::vector<Eigen::Vector3d>& points3D,
82                           Eigen::Vector4d* qvec, Eigen::Vector3d* tvec,
83                           Camera* camera, size_t* num_inliers,
84                           std::vector<char>* inlier_mask) {
85   options.Check();
86 
87   std::vector<double> focal_length_factors;
88   if (options.estimate_focal_length) {
89     // Generate focal length factors using a quadratic function,
90     // such that more samples are drawn for small focal lengths
91     focal_length_factors.reserve(options.num_focal_length_samples + 1);
92     const double fstep = 1.0 / options.num_focal_length_samples;
93     const double fscale =
94         options.max_focal_length_ratio - options.min_focal_length_ratio;
95     for (double f = 0; f <= 1.0; f += fstep) {
96       focal_length_factors.push_back(options.min_focal_length_ratio +
97                                      fscale * f * f);
98     }
99   } else {
100     focal_length_factors.reserve(1);
101     focal_length_factors.push_back(1);
102   }
103 
104   std::vector<std::future<void>> futures;
105   futures.resize(focal_length_factors.size());
106   std::vector<typename AbsolutePoseRANSAC::Report,
107               Eigen::aligned_allocator<typename AbsolutePoseRANSAC::Report>>
108       reports;
109   reports.resize(focal_length_factors.size());
110 
111   ThreadPool thread_pool(std::min(
112       options.num_threads, static_cast<int>(focal_length_factors.size())));
113 
114   for (size_t i = 0; i < focal_length_factors.size(); ++i) {
115     futures[i] = thread_pool.AddTask(
116         EstimateAbsolutePoseKernel, *camera, focal_length_factors[i], points2D,
117         points3D, options.ransac_options, &reports[i]);
118   }
119 
120   double focal_length_factor = 0;
121   Eigen::Matrix3x4d proj_matrix;
122   *num_inliers = 0;
123   inlier_mask->clear();
124 
125   // Find best model among all focal lengths.
126   for (size_t i = 0; i < focal_length_factors.size(); ++i) {
127     futures[i].get();
128     const auto report = reports[i];
129     if (report.success && report.support.num_inliers > *num_inliers) {
130       *num_inliers = report.support.num_inliers;
131       proj_matrix = report.model;
132       *inlier_mask = report.inlier_mask;
133       focal_length_factor = focal_length_factors[i];
134     }
135   }
136 
137   if (*num_inliers == 0) {
138     return false;
139   }
140 
141   // Scale output camera with best estimated focal length.
142   if (options.estimate_focal_length && *num_inliers > 0) {
143     const std::vector<size_t>& focal_length_idxs = camera->FocalLengthIdxs();
144     for (const size_t idx : focal_length_idxs) {
145       camera->Params(idx) *= focal_length_factor;
146     }
147   }
148 
149   // Extract pose parameters.
150   *qvec = RotationMatrixToQuaternion(proj_matrix.leftCols<3>());
151   *tvec = proj_matrix.rightCols<1>();
152 
153   if (IsNaN(*qvec) || IsNaN(*tvec)) {
154     return false;
155   }
156 
157   return true;
158 }
159 
EstimateRelativePose(const RANSACOptions & ransac_options,const std::vector<Eigen::Vector2d> & points1,const std::vector<Eigen::Vector2d> & points2,Eigen::Vector4d * qvec,Eigen::Vector3d * tvec)160 size_t EstimateRelativePose(const RANSACOptions& ransac_options,
161                             const std::vector<Eigen::Vector2d>& points1,
162                             const std::vector<Eigen::Vector2d>& points2,
163                             Eigen::Vector4d* qvec, Eigen::Vector3d* tvec) {
164   RANSAC<EssentialMatrixFivePointEstimator> ransac(ransac_options);
165   const auto report = ransac.Estimate(points1, points2);
166 
167   if (!report.success) {
168     return 0;
169   }
170 
171   std::vector<Eigen::Vector2d> inliers1(report.support.num_inliers);
172   std::vector<Eigen::Vector2d> inliers2(report.support.num_inliers);
173 
174   size_t j = 0;
175   for (size_t i = 0; i < points1.size(); ++i) {
176     if (report.inlier_mask[i]) {
177       inliers1[j] = points1[i];
178       inliers2[j] = points2[i];
179       j += 1;
180     }
181   }
182 
183   Eigen::Matrix3d R;
184 
185   std::vector<Eigen::Vector3d> points3D;
186   PoseFromEssentialMatrix(report.model, inliers1, inliers2, &R, tvec,
187                           &points3D);
188 
189   *qvec = RotationMatrixToQuaternion(R);
190 
191   if (IsNaN(*qvec) || IsNaN(*tvec)) {
192     return 0;
193   }
194 
195   return points3D.size();
196 }
197 
RefineAbsolutePose(const AbsolutePoseRefinementOptions & options,const std::vector<char> & inlier_mask,const std::vector<Eigen::Vector2d> & points2D,const std::vector<Eigen::Vector3d> & points3D,Eigen::Vector4d * qvec,Eigen::Vector3d * tvec,Camera * camera)198 bool RefineAbsolutePose(const AbsolutePoseRefinementOptions& options,
199                         const std::vector<char>& inlier_mask,
200                         const std::vector<Eigen::Vector2d>& points2D,
201                         const std::vector<Eigen::Vector3d>& points3D,
202                         Eigen::Vector4d* qvec, Eigen::Vector3d* tvec,
203                         Camera* camera) {
204   CHECK_EQ(inlier_mask.size(), points2D.size());
205   CHECK_EQ(points2D.size(), points3D.size());
206   options.Check();
207 
208   ceres::LossFunction* loss_function =
209       new ceres::CauchyLoss(options.loss_function_scale);
210 
211   double* camera_params_data = camera->ParamsData();
212   double* qvec_data = qvec->data();
213   double* tvec_data = tvec->data();
214 
215   std::vector<Eigen::Vector3d> points3D_copy = points3D;
216 
217   ceres::Problem problem;
218 
219   for (size_t i = 0; i < points2D.size(); ++i) {
220     // Skip outlier observations
221     if (!inlier_mask[i]) {
222       continue;
223     }
224 
225     ceres::CostFunction* cost_function = nullptr;
226 
227     switch (camera->ModelId()) {
228 #define CAMERA_MODEL_CASE(CameraModel)                                  \
229   case CameraModel::kModelId:                                           \
230     cost_function =                                                     \
231         BundleAdjustmentCostFunction<CameraModel>::Create(points2D[i]); \
232     break;
233 
234       CAMERA_MODEL_SWITCH_CASES
235 
236 #undef CAMERA_MODEL_CASE
237     }
238 
239     problem.AddResidualBlock(cost_function, loss_function, qvec_data, tvec_data,
240                              points3D_copy[i].data(), camera_params_data);
241     problem.SetParameterBlockConstant(points3D_copy[i].data());
242   }
243 
244   if (problem.NumResiduals() > 0) {
245     // Quaternion parameterization.
246     *qvec = NormalizeQuaternion(*qvec);
247     ceres::LocalParameterization* quaternion_parameterization =
248         new ceres::QuaternionParameterization;
249     problem.SetParameterization(qvec_data, quaternion_parameterization);
250 
251     // Camera parameterization.
252     if (!options.refine_focal_length && !options.refine_extra_params) {
253       problem.SetParameterBlockConstant(camera->ParamsData());
254     } else {
255       // Always set the principal point as fixed.
256       std::vector<int> camera_params_const;
257       const std::vector<size_t>& principal_point_idxs =
258           camera->PrincipalPointIdxs();
259       camera_params_const.insert(camera_params_const.end(),
260                                  principal_point_idxs.begin(),
261                                  principal_point_idxs.end());
262 
263       if (!options.refine_focal_length) {
264         const std::vector<size_t>& focal_length_idxs =
265             camera->FocalLengthIdxs();
266         camera_params_const.insert(camera_params_const.end(),
267                                    focal_length_idxs.begin(),
268                                    focal_length_idxs.end());
269       }
270 
271       if (!options.refine_extra_params) {
272         const std::vector<size_t>& extra_params_idxs =
273             camera->ExtraParamsIdxs();
274         camera_params_const.insert(camera_params_const.end(),
275                                    extra_params_idxs.begin(),
276                                    extra_params_idxs.end());
277       }
278 
279       if (camera_params_const.size() == camera->NumParams()) {
280         problem.SetParameterBlockConstant(camera->ParamsData());
281       } else {
282         ceres::SubsetParameterization* camera_params_parameterization =
283             new ceres::SubsetParameterization(
284                 static_cast<int>(camera->NumParams()), camera_params_const);
285         problem.SetParameterization(camera->ParamsData(),
286                                     camera_params_parameterization);
287       }
288     }
289   }
290 
291   ceres::Solver::Options solver_options;
292   solver_options.gradient_tolerance = options.gradient_tolerance;
293   solver_options.max_num_iterations = options.max_num_iterations;
294   solver_options.linear_solver_type = ceres::DENSE_QR;
295 
296   // The overhead of creating threads is too large.
297   solver_options.num_threads = 1;
298 #if CERES_VERSION_MAJOR < 2
299   solver_options.num_linear_solver_threads = 1;
300 #endif  // CERES_VERSION_MAJOR
301 
302   ceres::Solver::Summary summary;
303   ceres::Solve(solver_options, &problem, &summary);
304 
305   if (solver_options.minimizer_progress_to_stdout) {
306     std::cout << std::endl;
307   }
308 
309   if (options.print_summary) {
310     PrintHeading2("Pose refinement report");
311     PrintSolverSummary(summary);
312   }
313 
314   return summary.IsSolutionUsable();
315 }
316 
RefineRelativePose(const ceres::Solver::Options & options,const std::vector<Eigen::Vector2d> & points1,const std::vector<Eigen::Vector2d> & points2,Eigen::Vector4d * qvec,Eigen::Vector3d * tvec)317 bool RefineRelativePose(const ceres::Solver::Options& options,
318                         const std::vector<Eigen::Vector2d>& points1,
319                         const std::vector<Eigen::Vector2d>& points2,
320                         Eigen::Vector4d* qvec, Eigen::Vector3d* tvec) {
321   CHECK_EQ(points1.size(), points2.size());
322 
323   // CostFunction assumes unit quaternions.
324   *qvec = NormalizeQuaternion(*qvec);
325 
326   const double kMaxL2Error = 1.0;
327   ceres::LossFunction* loss_function = new ceres::CauchyLoss(kMaxL2Error);
328 
329   ceres::Problem problem;
330 
331   for (size_t i = 0; i < points1.size(); ++i) {
332     ceres::CostFunction* cost_function =
333         RelativePoseCostFunction::Create(points1[i], points2[i]);
334     problem.AddResidualBlock(cost_function, loss_function, qvec->data(),
335                              tvec->data());
336   }
337 
338   ceres::LocalParameterization* quaternion_parameterization =
339       new ceres::QuaternionParameterization;
340   problem.SetParameterization(qvec->data(), quaternion_parameterization);
341 
342   ceres::HomogeneousVectorParameterization* homogeneous_parameterization =
343       new ceres::HomogeneousVectorParameterization(3);
344   problem.SetParameterization(tvec->data(), homogeneous_parameterization);
345 
346   ceres::Solver::Summary summary;
347   ceres::Solve(options, &problem, &summary);
348 
349   return summary.IsSolutionUsable();
350 }
351 
352 }  // namespace colmap
353