JeVoisBase  1.20
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
ObjectMatcher.C
Go to the documentation of this file.
1 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 //
3 // JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
4 // California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5 //
6 // This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7 // redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8 // Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 // without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10 // License for more details. You should have received a copy of the GNU General Public License along with this program;
11 // if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 //
13 // Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14 // Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16 /*! \file */
17 
19 #include <jevois/Debug/Log.H>
20 #include <jevois/Debug/Profiler.H>
21 #include <sys/types.h>
22 #include <fcntl.h>
23 #include <dirent.h>
24 #include <sys/stat.h>
25 #include <unistd.h>
26 
27 #include <opencv2/core/utility.hpp>
28 #include <opencv2/imgcodecs.hpp>
29 #include <opencv2/features2d/features2d.hpp>
30 #include <opencv2/calib3d.hpp>
31 #include <opencv2/imgproc.hpp>
32 #include <opencv2/xfeatures2d/nonfree.hpp>
33 
34 // ####################################################################################################
36 { }
37 
38 // ####################################################################################################
40 {
41  // Initialize our feature computer and matcher:
42  itsFeatureDetector = cv::xfeatures2d::SURF::create(hessian::get());
43  size_t const ncores = std::min(4U, std::thread::hardware_concurrency());
44  for (size_t i = 0; i < ncores; ++i)
45  itsMatcher.push_back(cv::Ptr<cv::DescriptorMatcher>(new cv::BFMatcher(cv::NORM_L2)));
46 
47  //itsFeatureDetector = cv::ORB::create();
48  //itsMatcher.reset(new cv::BFMatcher(cv::NORM_HAMMING));
49 
50  // Load training images and compute keypoints and descriptors:
51  itsTrainData.clear();
52  std::string const dirname = absolutePath(traindir::get());
53 
54  LINFO("Training from " << dirname);
55 
56  DIR * dir = opendir(dirname.c_str());
57  if (dir == nullptr) PLFATAL("Cound not scan directory " << dirname);
58 
59  struct dirent * ent;
60  while ((ent = readdir(dir)) != nullptr)
61  {
62  std::string const fname = ent->d_name;
63  std::string const fullfname = dirname + "/" + fname;
64  if (fname[0] == '.') continue; // skip files that start with a period
65  struct stat st;
66  if (stat(fullfname.c_str(), &st) == -1) continue; // ignore any stat error
67  if ((st.st_mode & S_IFDIR) != 0) continue; // skip sub-directories
68 
69  // Create a training data entry:
70  itsTrainData.push_back(TrainData());
71  TrainData & td = itsTrainData.back();
72  td.name = fname;
73  td.image = cv::imread(fullfname, cv::IMREAD_GRAYSCALE);
74 
75  // Compute keypoints and descriptors:
76  itsFeatureDetector->detectAndCompute(td.image, cv::Mat(), td.keypoints, td.descriptors);
77 
78  LDEBUG(fname << ": " << td.keypoints.size() << " keypoints.");
79  }
80 
81  closedir(dir);
82 
83  LINFO("Training complete for " << itsTrainData.size() << " images.");
84 }
85 
86 // ####################################################################################################
87 double ObjectMatcher::process(cv::Mat const & img, size_t & trainidx, std::vector<cv::Point2f> & corners)
88 {
89  std::vector<cv::KeyPoint> keypoints;
90  this->detect(img, keypoints);
91 
92  cv::Mat descriptors;
93  this->compute(img, keypoints, descriptors);
94 
95  return this->match(keypoints, descriptors, trainidx, corners);
96 }
97 
98 // ####################################################################################################
99 double ObjectMatcher::process(cv::Mat const & img, size_t & trainidx)
100 {
101  std::vector<cv::KeyPoint> keypoints;
102  this->detect(img, keypoints);
103 
104  cv::Mat descriptors;
105  this->compute(img, keypoints, descriptors);
106 
107  return this->match(keypoints, descriptors, trainidx);
108 }
109 
110 // ####################################################################################################
111 void ObjectMatcher::detect(cv::Mat const & img, std::vector<cv::KeyPoint> & keypoints)
112 {
113  itsFeatureDetector->detect(img, keypoints);
114 }
115 
116 // ####################################################################################################
117 void ObjectMatcher::compute(cv::Mat const & img, std::vector<cv::KeyPoint> & keypoints, cv::Mat & descriptors)
118 {
119  itsFeatureDetector->compute(img, keypoints, descriptors);
120 }
121 
122 // ####################################################################################################
123 double ObjectMatcher::match(std::vector<cv::KeyPoint> const & keypoints, cv::Mat const & descriptors,
124  size_t & trainidx, std::vector<cv::Point2f> & corners)
125 {
126  if (itsTrainData.empty()) LFATAL("No training data loaded");
127 
128  // Parallelize the matching over our cores:
129  size_t const ncores = std::min(4U, std::thread::hardware_concurrency());
130  size_t const ntrain = itsTrainData.size();
131  size_t const r = ntrain % ncores;
132  size_t const q = (ntrain - r) / ncores;
133  size_t percore = q + 1; // up to r cores will process q+1 elements, and up to ncores-r will process q
134  std::vector<std::future<ObjectMatcher::MatchData> > fut;
135 
136  size_t startidx = 0;
137  for (size_t i = 0; i < ncores; ++i)
138  {
139  if (i == r) percore = q;
140  if (percore == 0) break;
141 
142  size_t const endidx = std::min(startidx + percore, ntrain);
143  fut.push_back(jevois::async([&](size_t cn, size_t mi, size_t ma)
144  { return this->matchcore(cn, keypoints, descriptors, mi, ma, true); },
145  i, startidx, endidx));
146  startidx += percore;
147  }
148 
149  // Wait for all jobs to complete, ignore any exception (which may seldom occur due to ill conditioned homography or
150  // such), and pick the best result:
151  double bestdist = 1.0e30;
152  for (auto & f : fut)
153  try
154  {
155  MatchData md = f.get();
156  if (md.avgdist < bestdist) { bestdist = md.avgdist; trainidx = md.trainidx; corners = md.corners; }
157  }
158  catch (...) { }
159 
160  return bestdist;
161 }
162 
163 // ####################################################################################################
164 double ObjectMatcher::match(std::vector<cv::KeyPoint> const & keypoints, cv::Mat const & descriptors,
165  size_t & trainidx)
166 {
167  if (itsTrainData.empty()) LFATAL("No training data loaded");
168 
169  // Parallelize the matching over our cores:
170  size_t const ncores = std::min(4U, std::thread::hardware_concurrency());
171  size_t const ntrain = itsTrainData.size();
172  size_t const r = ntrain % ncores;
173  size_t const q = (ntrain - r) / ncores;
174  size_t percore = q + 1; // up to r cores will process q+1 elements, and up to ncores-r will process q
175  std::vector<std::future<ObjectMatcher::MatchData> > fut;
176 
177  size_t startidx = 0;
178  for (size_t i = 0; i < ncores; ++i)
179  {
180  if (i == r) percore = q;
181  if (percore == 0) break;
182 
183  size_t const endidx = std::min(startidx + percore, ntrain);
184  fut.push_back(jevois::async([&](size_t cn, size_t mi, size_t ma)
185  { return this->matchcore(cn, keypoints, descriptors, mi, ma, false); },
186  i, startidx, endidx));
187  startidx += percore;
188  }
189 
190  // Wait for all jobs to complete, ignore any exception (which may seldom occur due to ill conditioned homography or
191  // such), and pick the best result:
192  double bestdist = 1.0e30;
193  for (auto & f : fut)
194  try
195  {
196  MatchData md = f.get();
197  if (md.avgdist < bestdist) { bestdist = md.avgdist; trainidx = md.trainidx; }
198  }
199  catch (...) { }
200 
201  return bestdist;
202 }
203 
204 // ####################################################################################################
205 ObjectMatcher::MatchData ObjectMatcher::matchcore(size_t corenum, std::vector<cv::KeyPoint> const & keypoints,
206  cv::Mat const & descriptors, size_t minidx, size_t maxidx,
207  bool do_corners)
208 {
209  // Compute matches between query and training images:
210  double bestsofar = 1.0e30;
211  MatchData mdata { bestsofar, 0, { } };
212 
213  for (size_t idx = minidx; idx < maxidx; ++idx)
214  {
215  TrainData const & td = itsTrainData[idx];
216 
217  std::vector<cv::DMatch> matches;
218  itsMatcher[corenum]->match(descriptors, td.descriptors, matches);
219  if (matches.empty()) continue;
220 
221  // Sort the matches by distance:
222  std::sort(matches.begin(), matches.end());
223  std::vector<cv::DMatch> good_matches;
224  double const minDist = matches.front().distance, maxDist = matches.back().distance;
225 
226  // Keep only the good matches:
227  size_t const ptsPairs = std::min(goodpts::get().max(), matches.size());
228  double const dthresh = distthresh::get();
229  for (size_t i = 0; i < ptsPairs; ++i)
230  if (matches[i].distance <= dthresh) good_matches.push_back(matches[i]); else break;
231 
232  LDEBUG(td.name << ": Match distances: " << minDist << " .. " << maxDist << ", " <<
233  matches.size() << " matches, " << good_matches.size() << " good ones.");
234 
235  // Abort here if we did not get enough good matches:
236  if (good_matches.size() < goodpts::get().min()) continue;
237 
238  // Compute the average match distance:
239  double avgdist = 0.0;
240  for (cv::DMatch const & gm : good_matches) avgdist += gm.distance;
241  avgdist /= good_matches.size();
242  if (avgdist >= bestsofar) continue;
243 
244  LDEBUG("Object match: found " << td.name << " distance " << avgdist);
245 
246  // Localize the object:
247  std::vector<cv::Point2f> obj, scene;
248  for (size_t i = 0; i < good_matches.size(); ++i)
249  {
250  obj.push_back(td.keypoints[good_matches[i].trainIdx].pt);
251  scene.push_back(keypoints[good_matches[i].queryIdx].pt);
252  }
253 
254  if (do_corners)
255  {
256  // Get the corners from the training image:
257  std::vector<cv::Point2f> obj_corners(4);
258  obj_corners[0] = cv::Point(0, 0);
259  obj_corners[1] = cv::Point(td.image.cols, 0);
260  obj_corners[2] = cv::Point(td.image.cols, td.image.rows);
261  obj_corners[3] = cv::Point(0, td.image.rows);
262 
263  // Project the corners into the scene image. This may throw if the homography is too messed up:
264  try
265  {
266  // Compute the homography:
267  cv::Mat H = cv::findHomography(obj, scene, cv::RANSAC, 5.0);
268 
269  // Check that the homography is not garbage:
270  std::vector<double> sv; cv::SVD::compute(H, sv, cv::SVD::NO_UV);
271  LDEBUG("Homography sv " << sv.front() << " ... " << sv.back());
272 
273  if (sv.empty() == false && sv.back() >= 0.001 && sv.front() / sv.back() >= 100.0)
274  {
275  // The homography looks good, use it to map the object corners to the scene image:
276  cv::perspectiveTransform(obj_corners, mdata.corners, H);
277 
278  // If all went well, this is our current best object match:
279  mdata.trainidx = idx;
280  mdata.avgdist = avgdist;
281  bestsofar = avgdist;
282  }
283  } catch (...) { }
284  }
285  else
286  {
287  mdata.trainidx = idx;
288  mdata.avgdist = avgdist;
289  bestsofar = avgdist;
290  }
291  }
292 
293  return mdata;
294 }
295 
296 // ####################################################################################################
298 {
299  if (idx >= itsTrainData.size()) LFATAL("Index too large");
300  return itsTrainData[idx];
301 }
302 
303 // ####################################################################################################
305 { return itsTrainData.size(); }
306 
ObjectMatcher::TrainData::keypoints
std::vector< cv::KeyPoint > keypoints
Definition: ObjectMatcher.H:93
ObjectMatcher::traindata
const TrainData & traindata(size_t idx) const
Get the training data for a given index.
Definition: ObjectMatcher.C:297
Profiler.H
ObjectMatcher::numtrain
size_t numtrain() const
Get number of training images.
Definition: ObjectMatcher.C:304
jevois::async
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
quantize-inc.q
q
Definition: quantize-inc.py:95
LDEBUG
#define LDEBUG(msg)
Log.H
ObjectMatcher::match
double match(std::vector< cv::KeyPoint > const &keypoints, cv::Mat const &descriptors, size_t &trainidx, std::vector< cv::Point2f > &corners)
Match given descriptors against those of our training images, return best match distance.
Definition: ObjectMatcher.C:123
ObjectMatcher::TrainData
Training data structure for ObjectMatcher.
Definition: ObjectMatcher.H:89
ObjectMatcher::compute
void compute(cv::Mat const &img, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors)
Compute descriptors for given keypoints.
Definition: ObjectMatcher.C:117
ObjectMatcher::detect
void detect(cv::Mat const &img, std::vector< cv::KeyPoint > &keypoints)
Detect keypoints.
Definition: ObjectMatcher.C:111
ObjectMatcher::~ObjectMatcher
~ObjectMatcher()
Destructor.
Definition: ObjectMatcher.C:35
PLFATAL
#define PLFATAL(msg)
ObjectMatcher::process
double process(cv::Mat const &img, size_t &trainidx, std::vector< cv::Point2f > &corners)
Process a greyscale image, returns match score, object index, and bounding box corners for best match...
Definition: ObjectMatcher.C:87
ObjectMatcher::TrainData::descriptors
cv::Mat descriptors
Definition: ObjectMatcher.H:94
ObjectMatcher::TrainData::name
std::string name
Definition: ObjectMatcher.H:91
ObjectMatcher::TrainData::image
cv::Mat image
Definition: ObjectMatcher.H:92
LFATAL
#define LFATAL(msg)
jevois::Component::absolutePath
std::filesystem::path absolutePath(std::filesystem::path const &path="")
ObjectMatcher::postInit
void postInit() override
Load training images and compute keypoints and descriptors.
Definition: ObjectMatcher.C:39
ObjectMatcher.H
H
#define H(p, w)
LINFO
#define LINFO(msg)