27#include <opencv2/core/utility.hpp>
28#include <opencv2/imgcodecs.hpp>
29#include <opencv2/features2d/features2d.hpp>
30#include <opencv2/calib3d.hpp>
31#include <opencv2/imgproc.hpp>
32#include <opencv2/xfeatures2d/nonfree.hpp>
42 itsFeatureDetector = cv::xfeatures2d::SURF::create(hessian::get());
43 size_t const ncores = std::min(4U, std::thread::hardware_concurrency());
44 for (
size_t i = 0; i < ncores; ++i)
45 itsMatcher.push_back(cv::Ptr<cv::DescriptorMatcher>(
new cv::BFMatcher(cv::NORM_L2)));
52 std::string
const dirname =
absolutePath(traindir::get());
54 LINFO(
"Training from " << dirname);
56 DIR * dir = opendir(dirname.c_str());
57 if (dir ==
nullptr)
PLFATAL(
"Cound not scan directory " << dirname);
60 while ((ent = readdir(dir)) !=
nullptr)
62 std::string
const fname = ent->d_name;
63 std::string
const fullfname = dirname +
"/" + fname;
64 if (fname[0] ==
'.')
continue;
66 if (stat(fullfname.c_str(), &st) == -1)
continue;
67 if ((st.st_mode & S_IFDIR) != 0)
continue;
73 td.
image = cv::imread(fullfname, cv::IMREAD_GRAYSCALE);
83 LINFO(
"Training complete for " << itsTrainData.size() <<
" images.");
89 std::vector<cv::KeyPoint> keypoints;
90 this->
detect(img, keypoints);
93 this->
compute(img, keypoints, descriptors);
95 return this->
match(keypoints, descriptors, trainidx, corners);
101 std::vector<cv::KeyPoint> keypoints;
102 this->
detect(img, keypoints);
105 this->
compute(img, keypoints, descriptors);
107 return this->
match(keypoints, descriptors, trainidx);
113 itsFeatureDetector->detect(img, keypoints);
119 itsFeatureDetector->compute(img, keypoints, descriptors);
124 size_t & trainidx, std::vector<cv::Point2f> & corners)
126 if (itsTrainData.empty())
LFATAL(
"No training data loaded");
129 size_t const ncores = std::min(4U, std::thread::hardware_concurrency());
130 size_t const ntrain = itsTrainData.size();
131 size_t const r = ntrain % ncores;
132 size_t const q = (ntrain - r) / ncores;
133 size_t percore = q + 1;
134 std::vector<std::future<ObjectMatcher::MatchData> > fut;
137 for (
size_t i = 0; i < ncores; ++i)
139 if (i == r) percore = q;
140 if (percore == 0)
break;
142 size_t const endidx = std::min(startidx + percore, ntrain);
143 fut.push_back(
jevois::async([&](
size_t cn,
size_t mi,
size_t ma)
144 {
return this->matchcore(cn, keypoints, descriptors, mi, ma,
true); },
145 i, startidx, endidx));
151 double bestdist = 1.0e30;
155 MatchData md = f.get();
156 if (md.avgdist < bestdist) { bestdist = md.avgdist; trainidx = md.trainidx; corners = md.corners; }
167 if (itsTrainData.empty())
LFATAL(
"No training data loaded");
170 size_t const ncores = std::min(4U, std::thread::hardware_concurrency());
171 size_t const ntrain = itsTrainData.size();
172 size_t const r = ntrain % ncores;
173 size_t const q = (ntrain - r) / ncores;
174 size_t percore = q + 1;
175 std::vector<std::future<ObjectMatcher::MatchData> > fut;
178 for (
size_t i = 0; i < ncores; ++i)
180 if (i == r) percore = q;
181 if (percore == 0)
break;
183 size_t const endidx = std::min(startidx + percore, ntrain);
184 fut.push_back(
jevois::async([&](
size_t cn,
size_t mi,
size_t ma)
185 {
return this->matchcore(cn, keypoints, descriptors, mi, ma,
false); },
186 i, startidx, endidx));
192 double bestdist = 1.0e30;
196 MatchData md = f.get();
197 if (md.avgdist < bestdist) { bestdist = md.avgdist; trainidx = md.trainidx; }
205ObjectMatcher::MatchData ObjectMatcher::matchcore(
size_t corenum, std::vector<cv::KeyPoint>
const & keypoints,
206 cv::Mat
const & descriptors,
size_t minidx,
size_t maxidx,
210 double bestsofar = 1.0e30;
211 MatchData mdata { bestsofar, 0, { } };
213 for (
size_t idx = minidx; idx < maxidx; ++idx)
215 TrainData
const & td = itsTrainData[idx];
217 std::vector<cv::DMatch> matches;
218 itsMatcher[corenum]->match(descriptors, td.descriptors, matches);
219 if (matches.empty())
continue;
222 std::sort(matches.begin(), matches.end());
223 std::vector<cv::DMatch> good_matches;
224 double const minDist = matches.front().distance, maxDist = matches.back().distance;
227 size_t const ptsPairs = std::min(goodpts::get().max(), matches.size());
228 double const dthresh = distthresh::get();
229 for (
size_t i = 0; i < ptsPairs; ++i)
230 if (matches[i].distance <= dthresh) good_matches.push_back(matches[i]);
else break;
232 LDEBUG(td.name <<
": Match distances: " << minDist <<
" .. " << maxDist <<
", " <<
233 matches.size() <<
" matches, " << good_matches.size() <<
" good ones.");
236 if (good_matches.size() < goodpts::get().min())
continue;
239 double avgdist = 0.0;
240 for (cv::DMatch
const & gm : good_matches) avgdist += gm.distance;
241 avgdist /= good_matches.size();
242 if (avgdist >= bestsofar)
continue;
244 LDEBUG(
"Object match: found " << td.name <<
" distance " << avgdist);
247 std::vector<cv::Point2f> obj, scene;
248 for (
size_t i = 0; i < good_matches.size(); ++i)
250 obj.push_back(td.keypoints[good_matches[i].trainIdx].pt);
251 scene.push_back(keypoints[good_matches[i].queryIdx].pt);
257 std::vector<cv::Point2f> obj_corners(4);
258 obj_corners[0] = cv::Point(0, 0);
259 obj_corners[1] = cv::Point(td.image.cols, 0);
260 obj_corners[2] = cv::Point(td.image.cols, td.image.rows);
261 obj_corners[3] = cv::Point(0, td.image.rows);
267 cv::Mat
H = cv::findHomography(obj, scene, cv::RANSAC, 5.0);
270 std::vector<double> sv; cv::SVD::compute(H, sv, cv::SVD::NO_UV);
271 LDEBUG(
"Homography sv " << sv.front() <<
" ... " << sv.back());
273 if (sv.empty() ==
false && sv.back() >= 0.001 && sv.front() / sv.back() >= 100.0)
276 cv::perspectiveTransform(obj_corners, mdata.corners, H);
279 mdata.trainidx = idx;
280 mdata.avgdist = avgdist;
287 mdata.trainidx = idx;
288 mdata.avgdist = avgdist;
299 if (idx >= itsTrainData.size())
LFATAL(
"Index too large");
300 return itsTrainData[idx];
305{
return itsTrainData.size(); }
size_t numtrain() const
Get number of training images.
double process(cv::Mat const &img, size_t &trainidx, std::vector< cv::Point2f > &corners)
Process a greyscale image, returns match score, object index, and bounding box corners for best match...
~ObjectMatcher()
Destructor.
TrainData const & traindata(size_t idx) const
Get the training data for a given index.
void postInit() override
Load training images and compute keypoints and descriptors.
void compute(cv::Mat const &img, std::vector< cv::KeyPoint > &keypoints, cv::Mat &descriptors)
Compute descriptors for given keypoints.
double match(std::vector< cv::KeyPoint > const &keypoints, cv::Mat const &descriptors, size_t &trainidx, std::vector< cv::Point2f > &corners)
Match given descriptors against those of our training images, return best match distance.
void detect(cv::Mat const &img, std::vector< cv::KeyPoint > &keypoints)
Detect keypoints.
std::filesystem::path absolutePath(std::filesystem::path const &path="")
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
Training data structure for ObjectMatcher.
std::vector< cv::KeyPoint > keypoints