libcvd-members
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[libcvd-members] libcvd/progs calibrate.cxx


From: Ethan Eade
Subject: [libcvd-members] libcvd/progs calibrate.cxx
Date: Mon, 30 Jun 2008 18:46:10 +0000

CVSROOT:        /cvsroot/libcvd
Module name:    libcvd
Changes by:     Ethan Eade <ethaneade>  08/06/30 18:46:10

Modified files:
        progs          : calibrate.cxx 

Log message:
        Fixed and improved calibration program.
        
        The internal parameter optimization now makes much more sense and works
        properly, so it's also enabled by default.  
        
        The Levenberg-Marquardt optimization is fixed/written properly. 
        
        Several false dependencies have been removed, along with lots of unused 
code.
        
        This code depends on an up-to-date se3.h in TooN, for Jacobian 
functions.

CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/libcvd/progs/calibrate.cxx?cvsroot=libcvd&r1=1.4&r2=1.5

Patches:
Index: calibrate.cxx
===================================================================
RCS file: /cvsroot/libcvd/libcvd/progs/calibrate.cxx,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -b -r1.4 -r1.5
--- calibrate.cxx       12 May 2008 17:52:05 -0000      1.4
+++ calibrate.cxx       30 Jun 2008 18:46:10 -0000      1.5
@@ -2,10 +2,8 @@
 #include <iostream>
 #include <fstream>
 #include <sstream>
-#include <deque>
 
 #include <TooN/helpers.h>
-#include <TooN/wls.h>
 #include <TooN/Cholesky.h>
 #include <TooN/se3.h>
 
@@ -14,10 +12,9 @@
 #include <cvd/videosource.h>
 #include <cvd/gl_helpers.h>
 #include <cvd/vision.h>
-#include <cvd/draw.h>
 #include <cvd/image_io.h>
 #include <cvd/image_interpolate.h>
-#include <cvd/videodisplay.h>
+#include <cvd/glwindow.h>
 #include <cvd/random.h>
 #include <cvd/timer.h>
 #include <cvd/colourspaces.h>
@@ -44,43 +41,14 @@
 #else
 string videoDevice = "v4l2:///dev/video0";
 #endif
-Vector<6> cameraParameters = (make_Vector, 1000,  1000,  320,  240,  0,  0);
+Vector<6> cameraParameters = (make_Vector, 500, 500,  320,  240,  0,  0);
 int bottomLeftWhite = 1;
 int gridx = 11;
 int gridy = 7;
 double cellSize = 0.02;
 double interval = 0.5;
-string input, output;
 int generateErrorImage = 0;
-
-template <class T>
-std::ostream& operator << (std::ostream &os, Image<T> im)
-{
-  ImageRef size = im.size();
-  for (int i = 0; i < size.x; i++)
-    {
-      for (int j = 0; j < size.y; j++)
-        {
-          ImageRef temp (i, j);
-          std::cout << im[temp] << "     ";
-        }
-      std::cout << std::endl;
-    }
-
-  return os;
-}
-
-template <class A1, class A2>
-void makeProjectionParameterJacobian(const FixedVector<3,A1>& x_se3, 
FixedMatrix<2,6,A2>& J_pose)
-{
-  double z_inv = 1.0/x_se3[2];
-  double z_inv_sq = z_inv*z_inv;
-  double cross = x_se3[0]*x_se3[1] * z_inv_sq;
-  J_pose[0][0] = z_inv; J_pose[0][1] = 0; J_pose[0][2] = -x_se3[0]*z_inv_sq;
-  J_pose[0][3] = -cross;  J_pose[0][4] = 1 + x_se3[0]*x_se3[0]*z_inv_sq;  
J_pose[0][5] = -x_se3[1]*z_inv;
-  J_pose[1][0] = 0; J_pose[1][1] =  z_inv; J_pose[1][2] = -x_se3[1]*z_inv_sq;
-  J_pose[1][3] = -1 - x_se3[1]*x_se3[1]*z_inv_sq; J_pose[1][4] =  cross;  
J_pose[1][5] =  x_se3[0]*z_inv;
-}
+string output;
 
 void drawGrid (vector<Vector<2> > grid, int cols, int rows)
 {
@@ -125,100 +93,107 @@
 }
 
 template <class CamModel, class P>
-CVD::SE3 findSE3(const CVD::SE3& start, const vector<Vector<3> >& x, const 
vector<pair<size_t,P> >& p, const CamModel& camModel, Matrix<6>& Cinv, double 
noise)
+SE3 find_pose(const SE3& start, const vector<Vector<3> >& x, const 
vector<pair<size_t,P> >& p, const CamModel& camModel, double noise)
 {
-  Matrix<2> Rinv;
-  Identity(Rinv, camModel.get_parameters()[0] * 
camModel.get_parameters()[0]/noise);
+    vector<pair<Vector<2>, Matrix<2> > > unprojected(p.size());
+    for (size_t i=0; i<p.size(); ++i) {
+       unprojected[i] = camModel.unproject(p[i].second, Identity<2>(noise));
+    }
 
-  CVD::SE3 bestSE3;
+    SE3 bestSE3;
   double bestError = numeric_limits<double>::max();
   vector<Vector<2> > up(p.size());
   for (size_t i=0; i<p.size(); i++)
     up[i] = camModel.unproject(p[i].second);
-  CVD::SE3 se3;
+    SE3 se3;
   se3 = start;
-  int iter = 0;
-  while (++iter < 20)
-    {
-      WLS<6> wls;
-      wls.clear();
-      wls.add_prior(0.01);
-      for (unsigned int i=0; i<p.size(); i++)
-        {
-          Vector<3> wx = se3*x[p[i].first];
-          Vector<2> camFrame = project(wx);
-          Matrix<2,6> J;
-          makeProjectionParameterJacobian(wx, J);
-          Vector<2> delta = up[i]-camFrame;
-          wls.add_df(delta, J.T(), Rinv);
-        }
-      wls.compute();
-      Vector<6> change = wls.get_mu();
-      se3 = CVD::SE3::exp(change)*se3;
-      double error = 0;
-      for (size_t i=0; i<p.size(); i++)
-        {
-          Vector<3> cx = se3*x[p[i].first];
-          Vector<2> pp = project(cx);
-          error += (pp-up[i])*(pp-up[i]);
+
+    for (int iter=0; iter<4; ++iter) {
+       Matrix<6> I = zeros<6,6>();
+       Vector<6> b = zeros<6>();
+       for (size_t i=0; i<p.size(); ++i) {
+           Matrix<2,3> J_x;
+           Matrix<2,6> J_pose;
+           Vector<2> v = unprojected[i].first - transform_and_project(se3, 
x[p[i].first], J_x, J_pose);
+           const Matrix<2> Rinv = inverse(unprojected[i].second);
+           transformCovariance<util::PlusEquals>(J_pose.T(), Rinv, I);
+           b += J_pose.T() * (Rinv * v);
+       }
+       Cholesky<6> chol(I);
+       assert(chol.get_rank() == 6);
+       se3.left_multiply_by(SE3::exp(chol.inverse_times(b)));
+
+       double residual = 0;
+       for (size_t i=0; i<p.size(); ++i) {
+           Vector<2> v = unprojected[i].first - transform_and_project(se3, 
x[p[i].first]);
+           const Matrix<2> Rinv = inverse(unprojected[i].second);
+           residual += v * (Rinv * v);
         }
-      if (error < bestError)
-        {
-          bestError = error;
+       if (residual < bestError) {
+           bestError = residual;
           bestSE3 = se3;
-          Cinv = wls.get_C_inv();
-        }
-      else
-        {
-          //std::cerr << "findSE3 is diverging!\n";
-          break;
         }
     }
   return bestSE3;
 }
 
 template <class CamModel, class P>
-void findParams(const CVD::SE3& pose, const vector<Vector<3> >& x, const 
vector<pair<size_t,P> >& p, CamModel& camModel, double noise)
+SE3 find_pose_and_params(const SE3& start, const vector<Vector<3> >& x, const 
vector<pair<size_t,P> >& p, CamModel& camModel, double noise)
 {
-  static const int np = CamModel::num_parameters;
-  Vector<np> bestParams = camModel.get_parameters();
-  double bestError = numeric_limits<double>::max();
-  Matrix<2> Rinv;
-  Identity(Rinv, 1.0/noise);
-  int iter = 0;
-  while (++iter < 20)
-    {
-      WLS<6> wls;
-      wls.clear();
-      double preError = 0;
-      for (unsigned int i=0; i<p.size(); i++)
-        {
-          Vector<3> wx = pose*x[p[i].first];
-          Vector<2> plane = project(wx);
-          Vector<2> im = camModel.project(plane);
-          Vector<2> delta = p[i].second - im;
-          preError += delta*delta;
-          wls.add_df(delta, camModel.get_parameter_derivs(), Rinv);
-        }
-      if (preError < bestError)
-        bestError = preError;
-      wls.compute();
-      camModel.get_parameters() += wls.get_mu();
-      double error = 0;
-      for (size_t i=0; i<p.size(); i++)
-        {
-          Vector<3> cx = pose*x[p[i].first];
-          Vector<2> im = camModel.project(project(cx));
-          Vector<2> diff = im - p[i].second;
-          error += diff*diff;
-        }
-      if (error < bestError)
-        {
-          bestError = error;
-          bestParams = camModel.get_parameters();
+    static const int NCP = CamModel::num_parameters;
+    static const int NP = NCP + 6;
+    const Matrix<2> Rinv = Identity<2>(1.0/noise);
+    SE3 best_pose = start;
+    Vector<NCP> best_params = camModel.get_parameters();
+    SE3 pose = start;
+  
+    double residual = 0;
+    for (size_t i=0; i<p.size(); ++i) {
+       Vector<2> v = p[i].second - 
camModel.project(transform_and_project(pose, x[p[i].first]));
+       residual += v*Rinv*v;
+    }
+    double best_residual = residual;
+
+    double lambda = 1.0;
+    for (int iter=0; iter<10; ++iter) { 
+       Matrix<NP> I;
+       Identity(I, lambda);
+       Vector<NP> b = zeros<NP>();
+       for (size_t i=0; i<p.size(); ++i) {
+           Matrix<2,3> J_x;
+           Matrix<2,6> J_pose;
+           Vector<2> v = p[i].second - 
camModel.project(transform_and_project(pose, x[p[i].first], J_x, J_pose));
+           Matrix<2,NP> J;
+           J.template slice<0,0,2,NCP>() = camModel.get_parameter_derivs().T();
+           J.template slice<0,NCP,2,6>() = camModel.get_derivative() * J_pose;
+         
+           transformCovariance<util::PlusEquals>(J.T(), Rinv, I);
+           b += J.T() * (Rinv * v);
+       }
+       Cholesky<NP> chol(I);
+       assert(chol.get_rank() == NP);
+       Vector<NP> delta = chol.inverse_times(b);
+       camModel.get_parameters() += delta.template slice<0,NCP>();
+       pose.left_multiply_by(SE3::exp(delta.template slice<NCP,6>()));
+
+       double residual = 0;
+       for (size_t i=0; i<p.size(); ++i) {
+           Vector<2> v = p[i].second - 
camModel.project(transform_and_project(pose, x[p[i].first]));
+           residual += v*Rinv*v;
+       }
+       if (residual < best_residual) {
+           best_residual = residual;
+           best_pose = pose;
+           best_params = camModel.get_parameters();
+           lambda *= 0.5;
+       } else {
+           pose = best_pose;
+           camModel.get_parameters() = best_params;
+           lambda *= 1e2;
         }
     }
-  camModel.get_parameters() = bestParams;
+    camModel.get_parameters() = best_params;
+    return best_pose;
 }
 
 void getOptions(int argc, char* argv[])
@@ -227,8 +202,6 @@
       string arg = argv[i];
       if (arg == "-o") {
           output = argv[++i];
-      } else if (arg == "-i") {
-          input = argv[++i];
       } else if (arg == "-d") {
           videoDevice = argv[++i];
       } else if (arg == "-x") {
@@ -245,7 +218,7 @@
       } else if (arg == "-e") {
           generateErrorImage = 1;
       } else {
-        cout << argv[0] << " [options]\n\n"
+           cerr << argv[0] << " [options]\n\n"
         "Move virtual grid over real grid until it snaps on.\n"
         "Record a number of frames ~100.\n"
         "Press SPACE to calculate camera parameters.\n\n"
@@ -256,15 +229,13 @@
         "  -y  grid dimension in y (height) direction\t7\n"
         "  -s  grid cell size\t\t\t\t0.02 m\n"
         "  -t  interval between captured frames\t\t0.5 s\n"
-        "  -c  initial camera parameters\t\t\t1000 1000 320 240 0 0\n"
+               "  -c  initial camera parameters\t\t\t" << cameraParameters << 
"\n"
         "  -e  generate image showing errors per pixel\n";
         exit(0);
       }
     }
-  if (input.length() != 0)
-    return;
   try {
-        cout << "opening " << videoDevice << endl;
+        cerr << "opening " << videoDevice << endl;
         videoBuffer = open_video_source<CAMERA_PIXEL>(videoDevice);
   }
   catch (CVD::Exceptions::All& e) {
@@ -302,10 +273,10 @@
 }
 
 struct MeasurementSet
-  {
+{
     vector<Vector<2> > im;
     vector<Vector<3> > world;
-  };
+};
 
 template <class CM>
 double getReprojectionError(const vector<MeasurementSet>& ms, const 
vector<SE3>& poses, CM& cm)
@@ -363,82 +334,14 @@
 }
 
 template <class CM>
-void improve(vector<MeasurementSet>& ms, vector<SE3>& pose, CM& cm, double 
epsilon)
-{
-  Vector<CM::num_parameters> J_param;
-  zero(J_param);
-  for (size_t i=0; i<ms.size(); i++)
-    {
-      Vector<6> J_pose_sum;
-      Zero(J_pose_sum);
-      for (size_t j=0; j<ms[i].im.size(); j++)
-        {
-          Vector<3> camFrame = pose[i] * ms[i].world[j];
-          Vector<2> im = cm.project(project(camFrame));
-          Vector<2> v = ms[i].im[j] - im;
-          Matrix<2,6> J_pose;
-          makeProjectionParameterJacobian(camFrame, /*pose[i],*/ J_pose);
-          J_pose_sum += (cm.get_derivative() * J_pose).T() * v;
-          J_param += cm.get_parameter_derivs(v);
-        }
-      pose[i] = SE3::exp(J_pose_sum * epsilon) * pose[i];
-    }
-  cm.get_parameters() += epsilon * J_param;
-}
-
-template <class CM>
-void improvePoses(const vector<MeasurementSet>& ms, vector<SE3>& pose, const 
CM& cm)
-{
-  for (size_t i=0; i<ms.size(); i++)
-    {
-      WLS<6> wls;
-      wls.clear();
-      wls.add_prior(1e-3);
-      for (size_t j=0; j<ms[i].im.size(); j++)
-        {
-          Vector<3> camFrame = pose[i] * ms[i].world[j];
-          Vector<2> im = cm.project(project(camFrame));
-          Vector<2> v = ms[i].im[j] - im;
-          Matrix<2,6> J_pose;
-          makeProjectionParameterJacobian(camFrame, /*pose[i],*/ J_pose);
-          J_pose = cm.get_derivative() * J_pose;
-          wls.add_df(v[0], J_pose[0]);
-          wls.add_df(v[1], J_pose[1]);
-        }
-      wls.compute();
-      pose[i] = SE3::exp(wls.get_mu()) * pose[i];
-    }
-}
-
-template <class CM>
-void improveParams(const vector<MeasurementSet>& ms, const vector<SE3>& pose, 
CM& cm)
-{
-  WLS<CM::num_parameters> wls;
-  wls.clear();
-  wls.add_prior(1e-3);
-  for (size_t i=0; i<ms.size(); i++)
-    {
-      for (size_t j=0; j<ms[i].im.size(); j++)
-        {
-          Vector<3> camFrame = pose[i] * ms[i].world[j];
-          Vector<2> im = cm.project(project(camFrame));
-          Vector<2> v = ms[i].im[j] - im;
-          Matrix<2,CM::num_parameters> J_param = cm.get_parameter_derivs().T();
-          wls.add_df(v[0], J_param[0]);
-          wls.add_df(v[1], J_param[1]);
-        }
-    }
-  wls.compute();
-  cm.get_parameters() += wls.get_mu();
-}
-
-template <class CM>
 void improveLM(vector<MeasurementSet>& ms, vector<SE3>& pose, CM& cm, double 
lambda)
 {
   Matrix<> JTJ(CM::num_parameters+ms.size()*6,CM::num_parameters+ms.size()*6);
   Vector<> JTe(JTJ.num_rows());
+
   Zero(JTJ);
-  Zero(JTe);
+    Vector<CM::num_parameters> JTep = zeros<6>();
+
   for (size_t i=0; i<ms.size(); i++)
     {
       Matrix<6> poseBlock;
@@ -447,33 +350,41 @@
       Zero(poseBlock);
       Zero(paramBlock);
       Zero(offDiag);
+      
+       Vector<6> JTei = zeros<6>();
+      
       for (size_t j=0; j<ms[i].im.size(); j++)
         {
           Vector<3> camFrame = pose[i] * ms[i].world[j];
-          Vector<2> im = cm.project(project(camFrame));
-          Vector<2> v = ms[i].im[j] - im;
+           Matrix<2,3> J_x;
           Matrix<2,6> J_pose;
-          makeProjectionParameterJacobian(camFrame, /*pose[i],*/ J_pose);
+           Vector<2> v = ms[i].im[j] - 
cm.project(transform_and_project(pose[i], ms[i].world[j], J_x, J_pose));
+           //Vector<2> im = cm.project(project(camFrame));
+           //Vector<2> v = ms[i].im[j] - im;
+           //makeProjectionParameterJacobian(camFrame, /*pose[i],*/ J_pose);
           J_pose = cm.get_derivative() * J_pose;
           Matrix<2,CM::num_parameters> J_param = cm.get_parameter_derivs().T();
           poseBlock += J_pose.T()*J_pose;
           paramBlock += J_param.T() * J_param;
           offDiag += J_param.T() * J_pose;
-          JTe.slice(CM::num_parameters + i*6, 6) = 
JTe.slice(CM::num_parameters + i*6, 6) + J_pose.T() * v;
-          JTe.slice(0,CM::num_parameters) = JTe.slice(0,CM::num_parameters) + 
J_param.T() * v;
+           JTei += J_pose.T() * v;
+           JTep += J_param.T() * v;
         }
+       JTe.slice(CM::num_parameters + i*6, 6) = JTei;
       JTJ.slice(CM::num_parameters + i*6, CM::num_parameters + i*6, 6,6) = 
poseBlock;
-      JTJ.slice(0,0,CM::num_parameters, CM::num_parameters) = 
JTJ.slice(0,0,CM::num_parameters, CM::num_parameters) + paramBlock;
+       JTJ.slice(0,0,CM::num_parameters, CM::num_parameters) += paramBlock;
       JTJ.slice(0,CM::num_parameters + i*6, CM::num_parameters, 6) = offDiag;
-      JTJ.slice(CM::num_parameters + i*6,0, 6, CM::num_parameters) = 
offDiag.T();
+       //JTJ.slice(CM::num_parameters + i*6,0, 6, CM::num_parameters) = 
offDiag.T();
     }
+    JTe.slice(0,CM::num_parameters) = JTep;
+  
   for (int i=0; i<JTJ.num_rows(); i++)
     JTJ[i][i] += lambda;
   Vector<> delta = Cholesky<>(JTJ).backsub(JTe);
   cm.get_parameters() += delta.template slice<0,CM::num_parameters>();
   for (size_t i=0; i<pose.size(); i++)
     {
-      pose[i] = SE3::exp(delta.slice(CM::num_parameters+i*6, 6)) * pose[i];
+       pose[i].left_multiply_by(SE3::exp(delta.slice(CM::num_parameters+i*6, 
6)));
     }
 }
 
@@ -492,11 +403,10 @@
       Zero(offDiag);
       for (size_t j=0; j<ms[i].im.size(); j++)
         {
-          Vector<3> camFrame = pose[i] * ms[i].world[j];
-          Vector<2> im = cm.project(project(camFrame));
-          Vector<2> v = ms[i].im[j] - im;
+           Matrix<2,3> J_x;
           Matrix<2,6> J_pose;
-          makeProjectionParameterJacobian(camFrame, /*pose[i],*/ J_pose);
+           Vector<2> v = ms[i].im[j] - 
cm.project(transform_and_project(pose[i], ms[i].world[j], J_x, J_pose));
+
           J_pose = cm.get_derivative() * J_pose;
           Matrix<2,CM::num_parameters> J_param = cm.get_parameter_derivs().T();
           poseBlock += J_pose.T()*J_pose;
@@ -508,58 +418,15 @@
       JTJ.slice(0,CM::num_parameters + i*6, CM::num_parameters, 6) = offDiag;
       JTJ.slice(CM::num_parameters + i*6,0, 6, CM::num_parameters) = 
offDiag.T();
     }
-  Matrix<> Cov = LU<>(JTJ).get_inverse();
-  C = Cov.template slice<0,0,CM::num_parameters, CM::num_parameters>();
-}
-
-template <int N>
-Vector<N> sampleUnitSphere()
-{
-  Vector<N> s;
-  do
-    {
-      for (int i=0; i<N; i++)
-        s[i] = drand48()*2 - 1.0;
-    }
-  while (s*s > 1);
-  return s;
-}
-
-void saveData(ostream& out, ImageRef size, double factor, const Vector<6>& 
parameters, const vector<MeasurementSet>& ms, const vector<SE3>& poses)
-{
-  out << size.x << " \t" << size.y << endl;
-  out << factor << endl;
-  out << parameters << endl;
-  out << ms.size() << endl;
-  for (size_t i=0; i<ms.size(); i++)
-    {
-      out << poses[i].ln() << endl;
-      out << ms[i].im.size() << endl;
-      for (size_t j=0; j<ms[i].im.size(); j++)
-        {
-          out << ms[i].im[j] << endl;
-          out << ms[i].world[j] << endl;
-        }
-    }
-}
-
-void loadData(istream& in, ImageRef& size, double& factor, Vector<6>& params, 
vector<MeasurementSet>& ms, vector<SE3>& poses)
-{
-  size_t views;
-  in >> size.x >> size.y >> factor >> params >> views;
-  ms.resize(views);
-  poses.resize(views);
-  for (size_t i=0; i<views; i++)
-    {
-      Vector<6> ln;
-      in >> ln;
-      poses[i] = SE3::exp(ln);
-      size_t points;
-      in >> points;
-      ms[i].im.resize(points);
-      ms[i].world.resize(points);
-      for (size_t j=0; j<points; j++)
-        in >> ms[i].im[j] >> ms[i].world[j];
+    Cholesky<> chol(JTJ);
+    Vector<> v(JTJ.num_cols());
+    Zero(v);
+  
+    for (int i=0; i<CM::num_parameters; ++i) {
+       v[i]=1;
+       Vector<> Cv = chol.inverse_times(v);
+       v[i]=0;
+       C.T()[i] = Cv.template slice<0,CM::num_parameters>();
     }
 }
 
@@ -774,26 +641,26 @@
 {
   getOptions(argc, argv);
 
-  VideoDisplay disp (0.0, 0.0, 640.0, 480.0);
+    string titlePrefix = "Calibrate: Align grid ([ and ] to resize)";
+    GLWindow disp(ImageRef(640,480), titlePrefix);
+    GLWindow::EventSummary events;
+    //VideoDisplay disp (0.0, 0.0, 640.0, 480.0);
   Camera::Quintic cameraModel;
   double factor=1.0;
   vector<MeasurementSet> measurementSets;
   vector<SE3> poses;
   ImageRef imageSize;
 
-  XEvent e;
-  disp.select_events(KeyPressMask);
-  bool doParams = false;
+    //XEvent e;
+    //disp.select_events(KeyPressMask);
+    bool doParams = true;
   int stage = 2; // 2 is init, 1 record, 0 done
 
-  string titlePrefix = "Calibrate: Align grid ([ and ] to resize)";
-  disp.set_title(titlePrefix);
+    //disp.set_title(titlePrefix);
 
   double curr = timer.get_time();
   srand48(static_cast<long int>(curr));
   srand(static_cast<unsigned int>(curr));
-  if (!input.length())
-    {
       imageSize = videoBuffer->size();
 
       cameraModel.get_parameters() = cameraParameters;
@@ -806,6 +673,9 @@
         grid3d[i] = unproject(grid[i]);
 
       SE3 pose;
+    pose.get_translation()[2] = -0.7;
+    const SE3 init_pose = pose;
+
       glPixelStorei(GL_UNPACK_ALIGNMENT,1);
       glDrawBuffer(GL_BACK);
       glEnable(GL_BLEND);
@@ -815,41 +685,34 @@
       bool prevFrameTracked = false;
       while (stage)
         {
-          // handle X events
-          while(disp.pending())
-            {
-              disp.get_event(&e);
-              if(e.type == KeyPress)
-                {
-                  KeySym k;
-                  XLookupString(&e.xkey, 0, 0, &k, 0);
-                  if(k == XK_Home)
-                    {
-                      pose = SE3::exp((make_Vector, 0, 0, 0, 0, 0, 0));
-                    }
-                  else if(k == XK_bracketleft)
-                    {
+           
+       events.clear();
+       disp.get_events(events);
+
+       for (GLWindow::EventSummary::key_iterator it = events.key_down.begin(); 
it!=events.key_down.end(); ++it) {
+           switch (it->first) {
+           case XK_Home:
+               pose = init_pose;
+               break;
+           case XK_bracketleft:
                       pose.get_translation()[2] += 0.02;
-                    }
-                  else if(k == XK_bracketright)
-                    {
+               break;
+           case XK_bracketright:
                       pose.get_translation()[2] -= 0.02;
-                    }
-                  else if(k == XK_Escape || k == XK_q || k == XK_Q)
-                    {
-                      return 0;
-                    }
-                  else if(k == XK_space)
-                    {
-                      if(stage == 1)
-                        {
+               break;
+           case XK_Escape:
+           case XK_q:
+           case XK_Q:
+               exit(0);
+               break;
+           case XK_space:
+               if(stage == 1) {
                           stage = 0;
                           titlePrefix = "Calibrate: Calculating Camera 
Parameters";
                           disp.set_title(titlePrefix);
                         }
-                    }
-                  else if(k == XK_p)
-                    {
+               break;
+           case XK_p:
                       doParams = !doParams;
                       if(stage == 1)
                                          {
@@ -858,7 +721,7 @@
                         else
                           titlePrefix = "Calibrate: Tracking Pose";
                                          }
-                    }
+               break;
                 }
             }
 
@@ -868,9 +731,7 @@
               videoBuffer->put_frame(vframe);
               vframe = videoBuffer->get_frame();
             }
-          Image<byte> temp = convert_image(*vframe);
-          Image<float> gray = convert_image(temp);
-          videoBuffer->put_frame(vframe);
+       Image<float> gray = convert_image<float>(*vframe);
 
           glDisable(GL_BLEND);
           glEnable(GL_TEXTURE_RECTANGLE_NV);
@@ -878,26 +739,28 @@
           glTexParameterf( GL_TEXTURE_RECTANGLE_NV, GL_TEXTURE_MIN_FILTER, 
GL_NEAREST );
           glTexParameterf( GL_TEXTURE_RECTANGLE_NV, GL_TEXTURE_MAG_FILTER, 
GL_NEAREST );
           glPixelStorei(GL_UNPACK_ALIGNMENT,1);
-          glTexImage2D( temp, 0, GL_TEXTURE_RECTANGLE_NV);
+       glTexImage2D(*vframe, 0, GL_TEXTURE_RECTANGLE_NV);
           glBegin(GL_QUADS);
               glTexCoord2i(0, 0);
               glVertex2i(0,0);
-              glTexCoord2i(temp.size().x, 0);
+       glTexCoord2i(vframe->size().x, 0);
               glVertex2i(640,0);
-              glTexCoord2i(temp.size().x,temp.size().y);
+       glTexCoord2i(vframe->size().x,vframe->size().y);
               glVertex2i(640,480);
-              glTexCoord2i(0, temp.size().y);
+       glTexCoord2i(0, vframe->size().y);
               glVertex2i(0, 480);
           glEnd ();
           glDisable(GL_TEXTURE_RECTANGLE_NV);
           glEnable(GL_BLEND);
 
+       videoBuffer->put_frame(vframe);
+
           //this is the bit that does the calibrating
           vector<pair<size_t, Vector<2> > > measurements;
           vector<Vector<2> > failedPoints;
 
           //smooth and prepare image for interpolation
-          convolveGaussian5_1(gray);
+       convolveGaussian(gray, 0.5);
           image_interpolate<Interpolate::Bilinear, float> imgInter =
             image_interpolate<Interpolate::Bilinear, float>(gray);
 
@@ -1064,10 +927,10 @@
                     titlePrefix = "Calibrate: Tracking Pose";
                 }
 
-              Matrix<6> Cinv;
-              pose = findSE3(pose, grid3d, measurements, cameraModel, Cinv, 
factor*factor);
               if (doParams)
-                findParams(pose, grid3d, measurements, cameraModel, 
factor*factor);
+               pose = find_pose_and_params(pose, grid3d, measurements, 
cameraModel, factor*factor);
+           else                  
+               pose = find_pose(pose, grid3d, measurements, cameraModel, 
factor*factor);
 
               double now = timer.get_time();
               if (now - lastMeasure > interval)
@@ -1136,33 +999,26 @@
           glEnd();
           disp.swap_buffers();
         }
-    }
-  /*if (!input.length() && output.length()) {
-      ofstream fout(output.c_str());
-      fout.precision(19);
-      saveData(fout, imageSize, factor, cameraModel.get_parameters(), 
measurementSets, poses);
-  } else if (input.length()) {
-      ifstream fin(input.c_str());
-      loadData(fin, imageSize, factor, cameraModel.get_parameters(), 
measurementSets, poses);
-  }*/
 
   size_t numMeasurements = 0;
   for (size_t i=0; i<measurementSets.size(); i++)
     numMeasurements += measurementSets[i].im.size();
 
   cout.precision(10);
+    cerr.precision(10);
   size_t numPoints = 0;
   for (size_t i=0; i<measurementSets.size(); i++)
     {
       numPoints += measurementSets[i].im.size();
     }
-  cout << measurementSets.size() << " sets of measurements, " << numPoints << 
" total points" << endl;
+    cerr << measurementSets.size() << " sets of measurements, " << numPoints 
<< " total points" << endl;
 
   double minError = getReprojectionError(measurementSets, poses, cameraModel);
-  cout << sqrt(minError/numPoints) / factor << " initial reprojection error" 
<< endl;
+    cerr << sqrt(minError/numPoints) / factor << " initial reprojection error" 
<< endl;
 
+    cerr << "Optimizing with Levenberg-Marquardt..." << endl;
   double lambda = 1;
-  while (lambda < 1e8)
+    while (lambda < 1e12)
     {
       Vector<6> params = cameraModel.get_parameters();
       vector<SE3> oldposes = poses;
@@ -1172,24 +1028,24 @@
         {
           minError = error;
           lambda *= 0.5;
-          cout << sqrt(minError/numPoints) / factor << endl;
+           cerr << "rms reprojection error = " << sqrt(minError/numPoints) / 
factor << " pixels" << endl;
         }
       else
         {
           poses = oldposes;
           cameraModel.get_parameters() = params;
-          lambda *= 3;
+           lambda *= 10;
         }
-      Vector<6> v = cameraModel.get_parameters();
-      v.slice<0,4>() /= factor;
-      cout << v << endl;
+       //Vector<6> v = cameraModel.get_parameters();
+       //v.slice<0,4>() /= factor;
+       //cerr << v << endl;
     }
 
   vector<pair<Vector<2>, Vector<2> > > errors;
   pair<double,double> rperr = getReprojectionError(measurementSets, poses, 
cameraModel, errors);
 
   if(generateErrorImage){
-    cout << "Generating errors.bmp..." << endl;
+       cerr << "Generating errors.pgm..." << endl;
     Image<float> errorImage(imageSize);
     zeroPixels(errorImage.data(), errorImage.totalsize());
     for (size_t i=0; i<errors.size(); i++)
@@ -1198,10 +1054,10 @@
         double mag = sqrt(errors[i].second * errors[i].second / rperr.second);
         errorImage[p] = mag;
         }
-    img_save(errorImage, "errors.bmp");
+       img_save(errorImage, "errors.pgm");
   }
 
-  cout << "Estimating uncertainty..." << endl;
+    cerr << "Estimating uncertainty..." << endl;
   Matrix<6> Cov;
   getUncertainty(measurementSets, poses, cameraModel, Cov);
   Cov.slice<4,4,2,2>() *= factor*factor;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]