JeVoisBase  1.22
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
Loading...
Searching...
No Matches
FirstVision.C
Go to the documentation of this file.
1// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2//
3// JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
4// California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5//
6// This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7// redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8// Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10// License for more details. You should have received a copy of the GNU General Public License along with this program;
11// if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12//
13// Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14// Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16/*! \file */
17
18#include <jevois/Core/Module.H>
19#include <jevois/Core/Engine.H>
21#include <jevois/Debug/Log.H>
22#include <jevois/Util/Utils.H>
24#include <jevois/Debug/Timer.H>
26
28
29#include <opencv2/core/core.hpp>
30#include <opencv2/imgproc/imgproc.hpp>
31#include <opencv2/calib3d/calib3d.hpp>
32
33#include <Eigen/Geometry> // for AngleAxis and Quaternion
34
35// REMINDER: make sure you understand the viral nature and terms of the above license. If you are writing code derived
36// from this file, you must offer your source under the GPL license too.
37
38static jevois::ParameterCategory const ParamCateg("FirstVision Options");
39
40//! Parameter \relates FirstVision
41JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(hcue, unsigned char, "Initial cue for target hue (0=red/do not use because of "
42 "wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan, "
43 "105=light blue, 120=blue, 135=purple, 150=pink)",
44 45, jevois::Range<unsigned char>(0, 179), ParamCateg);
45
46//! Parameter \relates FirstVision
47JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(scue, unsigned char, "Initial cue for target saturation lower bound",
48 50, ParamCateg);
49
50//! Parameter \relates FirstVision
51JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(vcue, unsigned char, "Initial cue for target value (brightness) lower bound",
52 200, ParamCateg);
53
54//! Parameter \relates FirstVision
55JEVOIS_DECLARE_PARAMETER(maxnumobj, size_t, "Max number of objects to declare a clean image. If more blobs are "
56 "detected in a frame, we skip that frame before we even try to analyze shapes of the blobs",
57 100, ParamCateg);
58
59//! Parameter \relates FirstVision
60JEVOIS_DECLARE_PARAMETER(hullarea, jevois::Range<unsigned int>, "Range of object area (in pixels) to track. Use this "
61 "if you want to skip shape analysis of very large or very small blobs",
62 jevois::Range<unsigned int>(20*20, 300*300), ParamCateg);
63
64//! Parameter \relates FirstVision
65JEVOIS_DECLARE_PARAMETER(hullfill, int, "Max fill ratio of the convex hull (percent). Lower values mean your shape "
66 "occupies a smaller fraction of its convex hull. This parameter sets an upper bound, "
67 "fuller shapes will be rejected.",
68 50, jevois::Range<int>(1, 100), ParamCateg);
69
70//! Parameter \relates FirstVision
71JEVOIS_DECLARE_PARAMETER(erodesize, size_t, "Erosion structuring element size (pixels), or 0 for no erosion",
72 2, ParamCateg);
73
74//! Parameter \relates FirstVision
75JEVOIS_DECLARE_PARAMETER(dilatesize, size_t, "Dilation structuring element size (pixels), or 0 for no dilation",
76 4, ParamCateg);
77
78//! Parameter \relates FirstVision
79JEVOIS_DECLARE_PARAMETER(epsilon, double, "Shape smoothing factor (higher for smoother). Shape smoothing is applied "
80 "to remove small contour defects before the shape is analyzed.",
81 0.015, jevois::Range<double>(0.001, 0.999), ParamCateg);
82
83//! Parameter \relates FirstVision
84JEVOIS_DECLARE_PARAMETER(debug, bool, "Show contours of all object candidates if true",
85 false, ParamCateg);
86
87//! Parameter \relates FirstVision
88JEVOIS_DECLARE_PARAMETER(threads, size_t, "Number of parallel vision processing threads. Thread 0 uses the HSV values "
89 "provided by user parameters; thread 1 broadens that fixed range a bit; threads 2-3 use a "
90 "narrow and broader learned HSV window over time",
91 4, jevois::Range<size_t>(2, 4), ParamCateg);
92
93//! Parameter \relates FirstVision
94JEVOIS_DECLARE_PARAMETER(showthread, size_t, "Thread number that is used to display HSV-thresholded image",
95 0, jevois::Range<size_t>(0, 3), ParamCateg);
96
97//! Parameter \relates FirstVision
98JEVOIS_DECLARE_PARAMETER(ethresh, double, "Shape error threshold (lower is stricter for exact shape)",
99 900.0, jevois::Range<double>(0.01, 1000.0), ParamCateg);
100
101//! Parameter \relates FirstVision
102JEVOIS_DECLARE_PARAMETER(dopose, bool, "Compute (and show) 6D object pose, requires a valid camera calibration. "
103 "When dopose is true, 3D serial messages are sent out, otherwise 2D serial messages.",
104 true, ParamCateg);
105
106//! Parameter \relates FirstVision
107JEVOIS_DECLARE_PARAMETER(iou, double, "Intersection-over-union ratio over which duplicates are eliminated",
108 0.3, jevois::Range<double>(0.01, 0.99), ParamCateg);
109
110//! Parameter \relates FirstVision
111JEVOIS_DECLARE_PARAMETER(objsize, cv::Size_<float>, "Object size (in meters)",
112 cv::Size_<float>(0.28F, 0.175F), ParamCateg);
113
114//! Parameter \relates FirstVision
115JEVOIS_DECLARE_PARAMETER(margin, size_t, "Margin from from frame borders (pixels). If any corner of a detected shape "
116 "gets closer than the margin to the frame borders, the shape will be rejected. This is to "
117 "avoid possibly bogus 6D pose estimation when the shape starts getting truncated as it "
118 "partially exits the camera's field of view.",
119 5, ParamCateg);
120
121//! Simple color-based detection of a U-shaped object for FIRST Robotics
122/*! This module isolates pixels within a given HSV range (hue, saturation, and value of color pixels), does some
123 cleanups, and extracts object contours. It is looking for a rectangular U shape of a specific size (set by parameter
124 \p objsize). See screenshots for an example of shape. It sends information about detected objects over serial.
125
126 This module usually works best with the camera sensor set to manual exposure, manual gain, manual color balance, etc
127 so that HSV color values are reliable. See the file \b script.cfg file in this module's directory for an example of
128 how to set the camera settings each time this module is loaded.
129
130 This code was loosely inspired by the JeVois \jvmod{ObjectTracker} module. Also see \jvmod{FirstPython} for a
131 simplified version of this module, written in Python.
132
133 This module is provided for inspiration. It has no pretension of actually solving the FIRST Robotics vision problem
134 in a complete and reliable way. It is released in the hope that FRC teams will try it out and get inspired to
135 develop something much better for their own robot.
136
137 General pipeline
138 ----------------
139
140 The basic idea of this module is the classic FIRST robotics vision pipeline: first, select a range of pixels in HSV
141 color pixel space likely to include the object. Then, detect contours of all blobs in range. Then apply some tests
142 on the shape of the detected blobs, their size, fill ratio (ratio of object area compared to its convex hull's
143 area), etc. Finally, estimate the location and pose of the object in the world.
144
145 In this module, we run up to 4 pipelines in parallel, using different settings for the range of HSV pixels
146 considered:
147
148 - Pipeline 0 uses the HSV values provided by user parameters;
149 - Pipeline 1 broadens that fixed range a bit;
150 - Pipelines 2-3 use a narrow and broader learned HSV window over time.
151
152 Detections from all 4 pipelines are considered for overlap and quality (raggedness of their outlines), and only the
153 cleanest of several overlapping detections is preserved. From those cleanest detections, pipelines 2-3 learn and
154 adapt the HSV range for future video frames.
155
156 Using this module
157 -----------------
158
159 Check out [this tutorial](http://jevois.org/tutorials/UserFirstVision.html).
160
161 Detection and quality control steps
162 -----------------------------------
163
164 The following messages appear for each of the 4 pipelines, at the bottom of the demo video, to help users figure out
165 why their object may not be detected:
166
167 - T0 to T3: thread (pipeline) number
168 - H=..., S=..., V=...: HSV range considered by that thread
169 - N=...: number of raw blobs detected in that range
170 - Because N blobs are considered in each thread from this point on, information about only the one that progressed
171 the farthest through a series of tests is shown. One letter is added each time a test is passed:
172 + H: the convex hull of the blob is quadrilateral (4 vertices)
173 + A: hull area is within range specified by parameter \p hullarea
174 + F: object to hull fill ratio is below the limit set by parameter \p hullfill (i.e., object is not a solid,
175 filled quadrilateral shape)
176 + S: the object has 8 vertices after shape smoothing to eliminate small shape defects (a U shape is
177 indeed expected to have 8 vertices).
178 + E: The shape discrepency between the original shape and the smoothed shape is acceptable per parameter
179 \p ethresh, i.e., the original contour did not have a lot of defects.
180 + M: the shape is not too close to the borders of the image, per parameter \p margin, i.e., it is unlikely to
181 be truncated as the object partially exits the camera's field of view.
182 + V: Vectors describing the shape as it related to its convex hull are non-zero, i.e., the centroid of the shape
183 is not exactly coincident with the centroid of its convex hull, as we would expect for a U shape.
184 + U: the shape is roughly upright; upside-down U shapes are rejected as likely spurious.
185 + OK: this thread detected at least one shape that passed all the tests.
186
187 The black and white picture at right shows the pixels in HSV range for the thread determined by parameter \p
188 showthread (with value 0 by default).
189
190 Serial Messages
191 ---------------
192
193 This module can send standardized serial messages as described in \ref UserSerialStyle. One message is issued on
194 every video frame for each detected and good object. The \p id field in the messages simply is \b FIRST for all
195 messages.
196
197 When \p dopose is turned on, 3D messages will be sent, otherwise 2D messages.
198
199 2D messages when \p dopose is off:
200
201 - Serial message type: \b 2D
202 - `id`: always `FIRST`
203 - `x`, `y`, or vertices: standardized 2D coordinates of object center or corners
204 - `w`, `h`: standardized marker size
205 - `extra`: none (empty string)
206
207 3D messages when \p dopose is on:
208
209 - Serial message type: \b 3D
210 - `id`: always `FIRST`
211 - `x`, `y`, `z`, or vertices: 3D coordinates in millimeters of object center, or corners
212 - `w`, `h`, `d`: object size in millimeters, a depth of 1mm is always used
213 - `extra`: none (empty string)
214
215 NOTE: 3D pose estimation from low-resolution 176x144 images at 120fps can be quite noisy. Make sure you tune your
216 HSV ranges very well if you want to operate at 120fps (see below). To operate more reliably at very low resolutions,
217 one may want to improve this module by adding subpixel shape refinement and tracking across frames.
218
219 See \ref UserSerialStyle for more on standardized serial messages, and \ref coordhelpers for more info on
220 standardized coordinates.
221
222 Trying it out
223 -------------
224
225 The default parameter settings (which are set in \b script.cfg explained below) attempt to detect yellow-green
226 objects. Present an object to the JeVois camera and see whether it is detected. When detected and good
227 enough according to a number of quality control tests, the outline of the object is drawn.
228
229 For further use of this module, you may want to check out the following tutorials:
230
231 - [Using the sample FIRST Robotics vision module](http://jevois.org/tutorials/UserFirstVision.html)
232 - [Tuning the color-based object tracker using a python graphical
233 interface](http://jevois.org/tutorials/UserColorTracking.html)
234 - [Making a motorized pan-tilt head for JeVois and tracking
235 objects](http://jevois.org/tutorials/UserPanTilt.html)
236 - \ref ArduinoTutorial
237
238 Tuning
239 ------
240
241 You need to provide the exact width and height of your physical shape to parameter \p objsize for this module to
242 work. It will look for a shape of that physical size (though at any distance and orientation from the camera). Be
243 sure you edit \b script.cfg and set the parameter \p objsize in there to the true measured physical size of your
244 shape.
245
246 You should adjust parameters \p hcue, \p scue, and \p vcue to isolate the range of Hue, Saturation, and Value
247 (respectively) that correspond to the objects you want to detect. Note that there is a \b script.cfg file in this
248 module's directory that provides a range tuned to a light yellow-green object, as shown in the demo screenshot.
249
250 Tuning the parameters is best done interactively by connecting to your JeVois camera while it is looking at some
251 object of the desired color. Once you have achieved a tuning, you may want to set the hcue, scue, and vcue
252 parameters in your \b script.cfg file for this module on the microSD card (see below).
253
254 Typically, you would start by narrowing down on the hue, then the value, and finally the saturation. Make sure you
255 also move your camera around and show it typical background clutter so check for false positives (detections of
256 things which you are not interested, which can happen if your ranges are too wide).
257
258 Config file
259 -----------
260
261 JeVois allows you to store parameter settings and commands in a file named \b script.cfg stored in the directory of
262 a module. The file \b script.cfg may contain any sequence of commands as you would type them interactively in the
263 JeVois command-line interface. For the \jvmod{FirstVision} module, a default script is provided that sets the camera
264 to manual color, gain, and exposure mode (for more reliable color values), and other example parameter values.
265
266 The \b script.cfg file for \jvmod{FirstVision} is stored on your microSD at
267 <b>JEVOIS:/modules/JeVois/FirstVision/script.cfg</b>
268
269 @author Laurent Itti
270
271 @videomapping YUYV 176 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
272 @videomapping YUYV 352 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
273 @videomapping YUYV 320 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
274 @videomapping YUYV 640 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
275 @videomapping NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstVision
276 @videomapping NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstVision
277 @email itti\@usc.edu
278 @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
279 @copyright Copyright (C) 2017 by Laurent Itti, iLab and the University of Southern California
280 @mainurl http://jevois.org
281 @supporturl http://jevois.org/doc
282 @otherurl http://iLab.usc.edu
283 @license GPL v3
284 @distribution Unrestricted
285 @restrictions None
286 \ingroup modules */
288 public jevois::Parameter<hcue, scue, vcue, maxnumobj, hullarea, hullfill, erodesize,
289 dilatesize, epsilon, debug, threads, showthread, ethresh,
290 dopose, iou, objsize, margin>
291{
292 protected:
293 jevois::CameraCalibration itsCalib; //!< Camera calibration parameters
294 bool itsCueChanged = true; //!< True when users change ranges
295
296 void onParamChange(hcue const & /*param*/, unsigned char const & /*newval*/) override { itsCueChanged = true; }
297 void onParamChange(scue const & /*param*/, unsigned char const & /*newval*/) override { itsCueChanged = true; }
298 void onParamChange(vcue const & /*param*/, unsigned char const & /*newval*/) override { itsCueChanged = true; }
299
300 // ####################################################################################################
301 //! Helper struct for an HSV range triplet, where each range is specified as a mean and sigma:
302 /*! Note that sigma is used differently for H, S, and V, under the assumption that we want to track a bright target:
303 For H, the range is [mean-sigma .. mean+sigma]. For S and V, the range is [mean-sigma .. 255]. See rmin() and
304 rmax() for details. */
305 struct hsvcue
306 {
307 //! Constructor
308 hsvcue(unsigned char h, unsigned char s, unsigned char v) : muh(h), sih(30), mus(s), sis(20), muv(v), siv(20)
309 { fix(); }
310
311 //! Constructor
312 hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig,
313 unsigned char v, unsigned char vsig) : muh(h), sih(hsig), mus(s), sis(ssig), muv(v), siv(vsig)
314 { fix(); }
315
316 //! Fix ranges so they don't go out of bounds
317 void fix()
318 {
319 muh = std::min(179.0F, std::max(1.0F, muh)); sih = std::max(1.0F, std::min(sih, 360.0F));
320 mus = std::min(254.0F, std::max(1.0F, mus)); sis = std::max(1.0F, std::min(sis, 512.0F));
321 muv = std::min(254.0F, std::max(1.0F, muv)); siv = std::max(1.0F, std::min(siv, 512.0F));
322 }
323
324 //! Get minimum triplet for use by cv::inRange()
325 cv::Scalar rmin() const
326 { return cv::Scalar(std::max(0.0F, muh - sih), std::max(0.0F, mus - sis), std::max(0.0F, muv - siv)); }
327
328 //! Get maximum triplet for use by cv::inRange()
329 cv::Scalar rmax() const
330 { return cv::Scalar(std::min(179.0F, muh + sih), 255, 255); }
331
332 float muh, sih; //!< Mean and sigma for H
333 float mus, sis; //!< Mean and sigma for S
334 float muv, siv; //!< Mean and sigma for V
335 };
336
337 std::vector<hsvcue> itsHSV;
338
339 // ####################################################################################################
340 //! Helper struct for a detected object
342 {
343 std::vector<cv::Point> contour; //!< The full detailed contour
344 std::vector<cv::Point> approx; //!< Smoothed approximation of the contour
345 std::vector<cv::Point> hull; //!< Convex hull of the contour
346 size_t threadnum; //!< Thread number that detected this object
347 float serr; //!< Shape error score (higher for rougher contours with defects)
348 };
349
350 //! Our detections, combined across all threads
351 std::vector<detection> itsDetections;
352 std::mutex itsDetMtx;
353
354 //! Kalman filters to learn and adapt HSV windows over time
355 std::shared_ptr<Kalman1D> itsKalH, itsKalS, itsKalV;
356
357 //! Erosion and dilation kernels shared across all detect threads
359
360 // ####################################################################################################
361 //! ParallelLoopBody class for the parallelization of the single markers pose estimation
362 /*! Derived from opencv_contrib ArUco module, it's just a simple solvePnP inside. */
363 class SinglePoseEstimationParallel : public cv::ParallelLoopBody
364 {
365 public:
366 SinglePoseEstimationParallel(cv::Mat & _objPoints, cv::InputArrayOfArrays _corners,
367 cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
368 cv::Mat & _rvecs, cv::Mat & _tvecs) :
369 objPoints(_objPoints), corners(_corners), cameraMatrix(_cameraMatrix),
370 distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs)
371 { }
372
373 void operator()(cv::Range const & range) const
374 {
375 int const begin = range.start;
376 int const end = range.end;
377
378 for (int i = begin; i < end; ++i)
379 cv::solvePnP(objPoints, corners.getMat(i), cameraMatrix, distCoeffs,
380 rvecs.at<cv::Vec3d>(i), tvecs.at<cv::Vec3d>(i));
381 }
382
383 private:
384 cv::Mat & objPoints;
385 cv::InputArrayOfArrays corners;
386 cv::InputArray cameraMatrix, distCoeffs;
387 cv::Mat & rvecs, tvecs;
388 };
389
390 // ####################################################################################################
391 // ####################################################################################################
392 // ####################################################################################################
393
394 public:
395 // ####################################################################################################
396 //! Constructor
397 FirstVision(std::string const & instance) : jevois::StdModule(instance)
398 {
399 itsKalH = addSubComponent<Kalman1D>("kalH");
400 itsKalS = addSubComponent<Kalman1D>("kalS");
401 itsKalV = addSubComponent<Kalman1D>("kalV");
402 }
403
404 //! Load camera calibration on init
405 void postInit()
406 {
407 itsCalib = engine()->loadCameraCalibration();
408 }
409
410 // ####################################################################################################
411 //! Virtual destructor for safe inheritance
412 virtual ~FirstVision() { }
413
414 // ####################################################################################################
415 //! Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners
416 /*! Inspired from the ArUco module of opencv_contrib
417 The corners array is always filled, but rvecs and tvecs only are if dopose is true */
418 void estimatePose(std::vector<std::vector<cv::Point2f> > & corners, cv::OutputArray _rvecs,
419 cv::OutputArray _tvecs)
420 {
421 auto const osiz = objsize::get();
422
423 // Get a vector of all our corners so we can map them to 3D and draw them:
424 corners.clear();
425 for (detection const & d : itsDetections)
426 {
427 corners.push_back(std::vector<cv::Point2f>());
428 std::vector<cv::Point2f> & v = corners.back();
429 for (auto const & p : d.hull) v.push_back(cv::Point2f(p));
430 }
431
432 if (dopose::get())
433 {
434 // set coordinate system in the middle of the object, with Z pointing out
435 cv::Mat objPoints(4, 1, CV_32FC3);
436 objPoints.ptr< cv::Vec3f >(0)[0] = cv::Vec3f(-osiz.width * 0.5F, -osiz.height * 0.5F, 0);
437 objPoints.ptr< cv::Vec3f >(0)[1] = cv::Vec3f(-osiz.width * 0.5F, osiz.height * 0.5F, 0);
438 objPoints.ptr< cv::Vec3f >(0)[2] = cv::Vec3f(osiz.width * 0.5F, osiz.height * 0.5F, 0);
439 objPoints.ptr< cv::Vec3f >(0)[3] = cv::Vec3f(osiz.width * 0.5F, -osiz.height * 0.5F, 0);
440
441 int nobj = (int)corners.size();
442 _rvecs.create(nobj, 1, CV_64FC3); _tvecs.create(nobj, 1, CV_64FC3);
443 cv::Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
444 cv::parallel_for_(cv::Range(0, nobj), SinglePoseEstimationParallel(objPoints, corners, itsCalib.camMatrix,
445 itsCalib.distCoeffs, rvecs, tvecs));
446 }
447 }
448
449 // ####################################################################################################
450 //! HSV object detector, we run several of those in parallel with different hsvcue settings
451 void detect(cv::Mat const & imghsv, size_t tnum, int dispx = 3, int dispy = 242, jevois::RawImage *outimg = nullptr)
452 {
453 // Threshold the HSV image to only keep pixels within the desired HSV range:
454 cv::Mat imgth;
455 hsvcue const & hsv = itsHSV[tnum]; cv::Scalar const rmin = hsv.rmin(), rmax = hsv.rmax();
456 cv::inRange(imghsv, rmin, rmax, imgth);
457 std::string str = jevois::sformat("T%zu: H=%03d-%03d S=%03d-%03d V=%03d-%03d ", tnum, int(rmin.val[0]),
458 int(rmax.val[0]), int(rmin.val[1]), int(rmax.val[1]),
459 int(rmin.val[2]), int(rmax.val[2]));
460
461 // Apply morphological operations to cleanup the image noise:
462 if (itsErodeElement.empty() == false) cv::erode(imgth, imgth, itsErodeElement);
463 if (itsDilateElement.empty() == false) cv::dilate(imgth, imgth, itsDilateElement);
464
465 // Detect objects by finding contours:
466 std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy;
467 cv::findContours(imgth, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
468 str += jevois::sformat("N=%03zu ", hierarchy.size());
469
470 double const epsi = epsilon::get();
471 int const m = margin::get();
472
473 // Identify the "good" objects:
474 std::string str2, beststr2;
475 if (hierarchy.size() > 0 && hierarchy.size() <= maxnumobj::get())
476 {
477 for (int index = 0; index >= 0; index = hierarchy[index][0])
478 {
479 // Keep track of our best detection so far:
480 if (str2.length() > beststr2.length()) beststr2 = str2;
481 str2.clear();
482
483 // Let's examine this contour:
484 std::vector<cv::Point> const & c = contours[index];
485 detection d;
486
487 // Compute contour area:
488 double const area = cv::contourArea(c, false);
489
490 // Compute convex hull:
491 std::vector<cv::Point> rawhull;
492 cv::convexHull(c, rawhull, true);
493 double const rawhullperi = cv::arcLength(rawhull, true);
494 cv::approxPolyDP(rawhull, d.hull, epsi * rawhullperi * 3.0, true);
495
496 // Is it the right shape?
497 if (d.hull.size() != 4) continue; // 4 vertices for the rectangular convex outline (shows as a trapezoid)
498 str2 += "H"; // Hull is quadrilateral
499
500 double const huarea = cv::contourArea(d.hull, false);
501 if ( ! hullarea::get().contains(int(huarea + 0.4999))) continue;
502 str2 += "A"; // Hull area ok
503
504 int const hufill = int(area / huarea * 100.0 + 0.4999);
505 if (hufill > hullfill::get()) continue;
506 str2 += "F"; // Fill is ok
507
508 // Check object shape:
509 double const peri = cv::arcLength(c, true);
510 cv::approxPolyDP(c, d.approx, epsi * peri, true);
511 if (d.approx.size() < 7 || d.approx.size() > 9) continue; // 8 vertices for a U shape
512 str2 += "S"; // Shape is ok
513
514 // Compute contour serr:
515 d.serr = 100.0 * cv::matchShapes(c, d.approx, cv::CONTOURS_MATCH_I1, 0.0);
516 if (d.serr > ethresh::get()) continue;
517 str2 += "E"; // Shape error is ok
518
519 // Reject the shape if any of its vertices gets within the margin of the image bounds. This is to avoid
520 // getting grossly incorrect 6D pose estimates as the shape starts getting truncated as it partially exits the
521 // camera field of view:
522 bool reject = false;
523 for (size_t i = 0; i < c.size(); ++i)
524 if (c[i].x < m || c[i].x >= imghsv.cols - m || c[i].y < m || c[i].y >= imghsv.rows - m)
525 { reject = true; break; }
526 if (reject) continue;
527 str2 += "M"; // Margin ok
528
529 // Re-order the 4 points in the hull if needed: In the pose estimation code, we will assume vertices ordered
530 // as follows:
531 //
532 // 0| |3
533 // | |
534 // | |
535 // 1----------2
536
537 // v10+v23 should be pointing outward the U more than v03+v12 is:
538 std::complex<float> v10p23(float(d.hull[0].x - d.hull[1].x + d.hull[3].x - d.hull[2].x),
539 float(d.hull[0].y - d.hull[1].y + d.hull[3].y - d.hull[2].y));
540 float const len10p23 = std::abs(v10p23);
541 std::complex<float> v03p12(float(d.hull[3].x - d.hull[0].x + d.hull[2].x - d.hull[1].x),
542 float(d.hull[3].y - d.hull[0].y + d.hull[2].y - d.hull[1].y));
543 float const len03p12 = std::abs(v03p12);
544
545 // Vector from centroid of U shape to centroid of its hull should also point outward of the U:
546 cv::Moments const momC = cv::moments(c);
547 cv::Moments const momH = cv::moments(d.hull);
548 std::complex<float> vCH(momH.m10 / momH.m00 - momC.m10 / momC.m00, momH.m01 / momH.m00 - momC.m01 / momC.m00);
549 float const lenCH = std::abs(vCH);
550
551 if (len10p23 < 0.1F || len03p12 < 0.1F || lenCH < 0.1F) continue;
552 str2 += "V"; // Shape vectors ok
553
554 float const good = (v10p23.real() * vCH.real() + v10p23.imag() * vCH.imag()) / (len10p23 * lenCH);
555 float const bad = (v03p12.real() * vCH.real() + v03p12.imag() * vCH.imag()) / (len03p12 * lenCH);
556
557 // We reject upside-down detections as those are likely to be spurious:
558 if (vCH.imag() >= -2.0F) continue;
559 str2 += "U"; // U shape is upright
560
561 // Fixup the ordering of the vertices if needed:
562 if (bad > good) { d.hull.insert(d.hull.begin(), d.hull.back()); d.hull.pop_back(); }
563
564 // This detection is a keeper:
565 str2 += " OK";
566 d.contour = c;
567 std::lock_guard<std::mutex> _(itsDetMtx);
568 itsDetections.push_back(d);
569 }
570 if (str2.length() > beststr2.length()) beststr2 = str2;
571 }
572
573 // Display any results requested by the users:
574 if (outimg && outimg->valid())
575 {
576 if (tnum == showthread::get() && int(outimg->width) == 2 * imgth.cols)
577 jevois::rawimage::pasteGreyToYUYV(imgth, *outimg, imgth.cols, 0);
578 jevois::rawimage::writeText(*outimg, str + beststr2, dispx, dispy + 12*tnum, jevois::yuyv::White);
579 }
580 }
581
582 // ####################################################################################################
583 //! Initialize (e.g., if user changes cue params) or update our HSV detection ranges
584 void updateHSV(size_t nthreads)
585 {
586 float const spread = 0.2F;
587
588 if (itsHSV.empty() || itsCueChanged)
589 {
590 // Initialize or reset because of user parameter change:
591 itsHSV.clear(); itsCueChanged = false;
592 for (size_t i = 0; i < nthreads; ++i)
593 {
594 hsvcue cue(hcue::get(), scue::get(), vcue::get());
595 cue.sih *= (1.0F + spread * i); cue.sis *= (1.0F + spread * i); cue.siv *= (1.0F + spread * i);
596 cue.fix();
597 itsHSV.push_back(cue);
598 }
599 if (nthreads > 2)
600 {
601 itsKalH->set(hcue::get()); itsKalH->get();
602 itsKalS->set(scue::get()); itsKalS->get();
603 itsKalV->set(vcue::get()); itsKalV->get();
604 }
605 }
606 else
607 {
608 // Kalman update:
609 if (nthreads > 2)
610 {
611 itsHSV[2].muh = itsKalH->get();
612 itsHSV[2].mus = itsKalS->get();
613 itsHSV[2].muv = itsKalV->get();
614 itsHSV[2].fix();
615 for (size_t i = 3; i < itsHSV.size(); ++i)
616 {
617 itsHSV[i] = itsHSV[2];
618 itsHSV[i].sih *= (1.0F + spread * i);
619 itsHSV[i].sis *= (1.0F + spread * i);
620 itsHSV[i].siv *= (1.0F + spread * i);
621 itsHSV[i].fix();
622 }
623 }
624 }
625 }
626
627 // ####################################################################################################
628 //! Clean up the detections by eliminating duplicates:
630 {
631 bool keepgoing = true;
632 double const iouth = iou::get();
633
634 while (keepgoing)
635 {
636 // We will stop if we do not eliminate any more objects:
637 keepgoing = false; int delidx = -1;
638
639 // Loop over all pairs of objects:
640 size_t const siz = itsDetections.size();
641 for (size_t i = 0; i < siz; ++i)
642 {
643 for (size_t j = 0; j < i; ++j)
644 {
645 std::vector<cv::Point> pts = itsDetections[i].hull;
646 for (cv::Point const & p : itsDetections[j].hull) pts.push_back(p);
647 std::vector<cv::Point> hull;
648 cv::convexHull(pts, hull); // FIXME should do a true union! this is just an approximation to it
649 double uarea = cv::contourArea(hull);
650 double iarea = cv::contourArea(itsDetections[i].hull) + cv::contourArea(itsDetections[j].hull) - uarea;
651
652 // note: object detection code guarantees non-zero area:
653 double const inoun = iarea / uarea;
654 if (inoun >= iouth)
655 {
656 if (itsDetections[i].serr > itsDetections[j].serr) delidx = j; else delidx = i;
657 break;
658 }
659 }
660 if (delidx != -1) break;
661 }
662 if (delidx != -1) { itsDetections.erase(itsDetections.begin() + delidx); keepgoing = true; }
663 }
664 }
665
666 // ####################################################################################################
667 //! Learn and update our HSV ranges
668 void learnHSV(size_t nthreads, cv::Mat const & imgbgr, jevois::RawImage *outimg = nullptr)
669 {
670 int const w = imgbgr.cols, h = imgbgr.rows;
671
672 // Compute the median filtered BGR image in a thread:
673 cv::Mat medimgbgr;
674 auto median_fut = jevois::async([&](){ cv::medianBlur(imgbgr, medimgbgr, 3); } );
675
676 // Get all the cleaned-up contours:
677 std::vector<std::vector<cv::Point> > contours;
678 for (detection const & d : itsDetections) contours.push_back(d.contour);
679
680 // If desired, draw all contours:
681 std::future<void> drawc_fut;
682 if (debug::get() && outimg && outimg->valid())
683 drawc_fut = jevois::async([&]() {
684 // We reinterpret the top portion of our YUYV output image as an opencv 8UC2 image:
685 cv::Mat outuc2(outimg->height, outimg->width, CV_8UC2, outimg->pixelsw<unsigned char>());
686 cv::drawContours(outuc2, contours, -1, jevois::yuyv::LightPink, 2);
687 } );
688
689 // Draw all the filled contours into a binary mask image:
690 cv::Mat mask(h, w, CV_8UC1, (unsigned char)0);
691 cv::drawContours(mask, contours, -1, 255, -1); // last -1 is for filled
692
693 // Wait until median filter is done:
694 median_fut.get();
695
696 // Compute mean and std BGR values inside objects:
697 cv::Mat mean, std;
698 cv::meanStdDev(medimgbgr, mean, std, mask);
699
700 // Convert to HSV:
701 cv::Mat bgrmean(2, 1, CV_8UC3); bgrmean.at<cv::Vec3b>(0, 0) = mean; bgrmean.at<cv::Vec3b>(1, 0) = std;
702 cv::Mat hsvmean; cv::cvtColor(bgrmean, hsvmean, cv::COLOR_BGR2HSV);
703
704 cv::Vec3b hsv = hsvmean.at<cv::Vec3b>(0, 0);
705 int H = hsv.val[0], S = hsv.val[1], V = hsv.val[2];
706
707 cv::Vec3b sighsv = hsvmean.at<cv::Vec3b>(1, 0);
708 int sH = sighsv.val[0], sS = sighsv.val[1], sV = sighsv.val[2];
709
710 // Set the new measurements:
711 itsKalH->set(H); itsKalS->set(S); itsKalV->set(V);
712
713 if (nthreads > 2)
714 {
715 float const eta = 0.4F;
716 itsHSV[2].sih = (1.0F - eta) * itsHSV[2].sih + eta * sH;
717 itsHSV[2].sis = (1.0F - eta) * itsHSV[2].sis + eta * sS;
718 itsHSV[2].siv = (1.0F - eta) * itsHSV[2].siv + eta * sV;
719 itsHSV[2].fix();
720 }
721
722 // note: drawc_fut may block us here until it is complete.
723 }
724
725 // ####################################################################################################
726 //! Send serial messages about each detection:
727 void sendAllSerial(int w, int h, std::vector<std::vector<cv::Point2f> > const & corners,
728 std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
729 {
730 if (rvecs.empty() == false)
731 {
732 // If we have rvecs and tvecs, we are doing 3D pose estimation, so send a 3D message:
733 auto const osiz = objsize::get();
734 for (size_t i = 0; i < corners.size(); ++i)
735 {
736 cv::Vec3d const & rv = rvecs[i];
737 cv::Vec3d const & tv = tvecs[i];
738
739 // Compute quaternion:
740 float theta = std::sqrt(rv[0] * rv[0] + rv[1] * rv[1] + rv[2] * rv[2]);
741 Eigen::Vector3f axis(rv[0], rv[1], rv[2]);
742 Eigen::Quaternion<float> q(Eigen::AngleAxis<float>(theta, axis));
743
744 sendSerialStd3D(tv[0], tv[1], tv[2], // position
745 osiz.width, osiz.height, 1.0F, // size
746 q.w(), q.x(), q.y(), q.z(), // pose
747 "FIRST"); // FIRST robotics shape
748 }
749 }
750 else
751 {
752 // Send one 2D message per object:
753 for (size_t i = 0; i < corners.size(); ++i)
754 sendSerialContour2D(w, h, corners[i], "FIRST");
755 }
756 }
757
758 // ####################################################################################################
759 //! Update the morphology structuring elements if needed
761 {
762 int e = erodesize::get();
763 if (e != itsErodeElement.cols)
764 {
765 if (e) itsErodeElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(e, e));
766 else itsErodeElement.release();
767 }
768
769 int d = dilatesize::get();
770 if (d != itsDilateElement.cols)
771 {
772 if (d) itsDilateElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(d, d));
773 else itsDilateElement.release();
774 }
775 }
776
777 // ####################################################################################################
778 //! Processing function, no USB video output
779 virtual void process(jevois::InputFrame && inframe) override
780 {
781 static jevois::Timer timer("processing");
782
783 // Wait for next available camera image. Any resolution ok:
784 jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
785
786 timer.start();
787
788 // Convert input image to BGR24, then to HSV:
789 cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
790 cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
791 size_t const nthreads = threads::get();
792
793 // Make sure our HSV range parameters are up to date:
794 updateHSV(nthreads);
795
796 // Clear any old detections and get ready to parallelize the detection work:
797 itsDetections.clear();
799
800 // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
801 std::vector<std::future<void> > dfut;
802 for (size_t i = 0; i < nthreads - 1; ++i)
803 dfut.push_back(jevois::async([&](size_t tn) { detect(imghsv, tn, 3, h+2); }, i));
804 detect(imghsv, nthreads - 1, 3, h+2);
805
806 // Wait for all threads to complete:
807 for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
808
809 // Let camera know we are done processing the input image:
810 inframe.done();
811
812 // Clean up the detections by eliminating duplicates:
814
815 // Learn the object's HSV value over time:
816 auto learn_fut = jevois::async([&]() { learnHSV(nthreads, imgbgr); });
817
818 // Map to 6D (inverse perspective):
819 std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
820 estimatePose(corners, rvecs, tvecs);
821
822 // Send all serial messages:
823 sendAllSerial(w, h, corners, rvecs, tvecs);
824
825 // Wait for all threads:
826 try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
827
828 // Show processing fps:
829 timer.stop();
830 }
831
832 // ####################################################################################################
833 //! Processing function, with USB video output
834 virtual void process(jevois::InputFrame && inframe, jevois::OutputFrame && outframe) override
835 {
836 static jevois::Timer timer("processing");
837
838 // Wait for next available camera image. Any resolution ok, but require YUYV since we assume it for drawings:
839 jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
840 inimg.require("input", w, h, V4L2_PIX_FMT_YUYV);
841
842 timer.start();
843
844 // While we process it, start a thread to wait for output frame and paste the input image into it:
845 jevois::RawImage outimg; // main thread should not use outimg until paste thread is complete
846 auto paste_fut = jevois::async([&]() {
847 outimg = outframe.get();
848 outimg.require("output", outimg.width, h + 50, inimg.fmt);
849 if (outimg.width != w && outimg.width != w * 2) LFATAL("Output image width should be 1x or 2x input width");
850 jevois::rawimage::paste(inimg, outimg, 0, 0);
851 jevois::rawimage::writeText(outimg, "JeVois FIRST Vision", 3, 3, jevois::yuyv::White);
853 });
854
855 // Convert input image to BGR24, then to HSV:
856 cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
857 cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
858 size_t const nthreads = threads::get();
859
860 // Make sure our HSV range parameters are up to date:
861 updateHSV(nthreads);
862
863 // Clear any old detections and get ready to parallelize the detection work:
864 itsDetections.clear();
866
867 // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
868 std::vector<std::future<void> > dfut;
869 for (size_t i = 0; i < nthreads - 1; ++i)
870 dfut.push_back(jevois::async([&](size_t tn) { detect(imghsv, tn, 3, h+2, &outimg); }, i));
871 detect(imghsv, nthreads - 1, 3, h+2, &outimg);
872
873 // Wait for all threads to complete:
874 for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
875
876 // Wait for paste to finish up:
877 paste_fut.get();
878
879 // Let camera know we are done processing the input image:
880 inframe.done();
881
882 // Clean up the detections by eliminating duplicates:
884
885 // Learn the object's HSV value over time:
886 auto learn_fut = jevois::async([&]() { learnHSV(nthreads, imgbgr, &outimg); });
887
888 // Map to 6D (inverse perspective):
889 std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
890 estimatePose(corners, rvecs, tvecs);
891
892 // Send all serial messages:
893 sendAllSerial(w, h, corners, rvecs, tvecs);
894
895 // Draw all detections in 3D:
896 drawDetections(outimg, corners, rvecs, tvecs);
897
898 // Show number of detected objects:
899 jevois::rawimage::writeText(outimg, "Detected " + std::to_string(itsDetections.size()) + " objects.",
900 w + 3, 3, jevois::yuyv::White);
901
902 // Wait for all threads:
903 try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
904
905 // Show processing fps:
906 std::string const & fpscpu = timer.stop();
907 jevois::rawimage::writeText(outimg, fpscpu, 3, h - 13, jevois::yuyv::White);
908
909 // Send the output image with our processing results to the host over USB:
910 outframe.send();
911 }
912
913 // ####################################################################################################
914 void drawDetections(jevois::RawImage & outimg, std::vector<std::vector<cv::Point2f> > corners,
915 std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
916 {
917 auto const osiz = objsize::get(); float const w = osiz.width, h = osiz.height;
918 int nobj = int(corners.size());
919
920 // This code is like drawDetectedMarkers() in cv::aruco, but for YUYV output image:
921 if (rvecs.empty())
922 {
923 // We are not doing 3D pose estimation. Just draw object outlines in 2D:
924 for (int i = 0; i < nobj; ++i)
925 {
926 std::vector<cv::Point2f> const & obj = corners[i];
927
928 // draw marker sides:
929 for (int j = 0; j < 4; ++j)
930 {
931 cv::Point2f const & p0 = obj[j];
932 cv::Point2f const & p1 = obj[ (j+1) % 4 ];
933 jevois::rawimage::drawLine(outimg, int(p0.x + 0.5F), int(p0.y + 0.5F),
934 int(p1.x + 0.5F), int(p1.y + 0.5F), 1, jevois::yuyv::LightPink);
935 //jevois::rawimage::writeText(outimg, std::to_string(j),
936 // int(p0.x + 0.5F), int(p0.y + 0.5F), jevois::yuyv::White);
937 }
938 }
939 }
940 else
941 {
942 // Show trihedron and parallelepiped centered on object:
943 float const hw = w * 0.5F, hh = h * 0.5F, dd = -0.5F * std::max(w, h);
944
945 for (int i = 0; i < nobj; ++i)
946 {
947 // Project axis points:
948 std::vector<cv::Point3f> axisPoints;
949 axisPoints.push_back(cv::Point3f(0.0F, 0.0F, 0.0F));
950 axisPoints.push_back(cv::Point3f(hw, 0.0F, 0.0F));
951 axisPoints.push_back(cv::Point3f(0.0F, hh, 0.0F));
952 axisPoints.push_back(cv::Point3f(0.0F, 0.0F, dd));
953
954 std::vector<cv::Point2f> imagePoints;
955 cv::projectPoints(axisPoints, rvecs[i], tvecs[i], itsCalib.camMatrix, itsCalib.distCoeffs, imagePoints);
956
957 // Draw axis lines:
958 jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
959 int(imagePoints[1].x + 0.5F), int(imagePoints[1].y + 0.5F),
961 jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
962 int(imagePoints[2].x + 0.5F), int(imagePoints[2].y + 0.5F),
964 jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
965 int(imagePoints[3].x + 0.5F), int(imagePoints[3].y + 0.5F),
967
968 // Also draw a parallelepiped:
969 std::vector<cv::Point3f> cubePoints;
970 cubePoints.push_back(cv::Point3f(-hw, -hh, 0.0F));
971 cubePoints.push_back(cv::Point3f(hw, -hh, 0.0F));
972 cubePoints.push_back(cv::Point3f(hw, hh, 0.0F));
973 cubePoints.push_back(cv::Point3f(-hw, hh, 0.0F));
974 cubePoints.push_back(cv::Point3f(-hw, -hh, dd));
975 cubePoints.push_back(cv::Point3f(hw, -hh, dd));
976 cubePoints.push_back(cv::Point3f(hw, hh, dd));
977 cubePoints.push_back(cv::Point3f(-hw, hh, dd));
978
979 std::vector<cv::Point2f> cuf;
980 cv::projectPoints(cubePoints, rvecs[i], tvecs[i], itsCalib.camMatrix, itsCalib.distCoeffs, cuf);
981
982 // Round all the coordinates:
983 std::vector<cv::Point> cu;
984 for (auto const & p : cuf) cu.push_back(cv::Point(int(p.x + 0.5F), int(p.y + 0.5F)));
985
986 // Draw parallelepiped lines:
987 jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[1].x, cu[1].y, 1, jevois::yuyv::LightGreen);
988 jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[2].x, cu[2].y, 1, jevois::yuyv::LightGreen);
989 jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[3].x, cu[3].y, 1, jevois::yuyv::LightGreen);
990 jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[0].x, cu[0].y, 1, jevois::yuyv::LightGreen);
991 jevois::rawimage::drawLine(outimg, cu[4].x, cu[4].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
992 jevois::rawimage::drawLine(outimg, cu[5].x, cu[5].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
993 jevois::rawimage::drawLine(outimg, cu[6].x, cu[6].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
994 jevois::rawimage::drawLine(outimg, cu[7].x, cu[7].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
995 jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
996 jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
997 jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
998 jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
999 }
1000 }
1001 }
1002};
1003
1004// Allow the module to be loaded as a shared object (.so) file:
JEVOIS_REGISTER_MODULE(ArUcoBlob)
int h
#define H(p, w)
ImVec2 V
double area(const std::vector< Point2D< T > > &polygon, const bool getsigned=false)
What is the area of a polygon?
Definition Point2D.H:422
ParallelLoopBody class for the parallelization of the single markers pose estimation.
void operator()(cv::Range const &range) const
SinglePoseEstimationParallel(cv::Mat &_objPoints, cv::InputArrayOfArrays _corners, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::Mat &_rvecs, cv::Mat &_tvecs)
Simple color-based detection of a U-shaped object for FIRST Robotics.
virtual ~FirstVision()
Virtual destructor for safe inheritance.
void sendAllSerial(int w, int h, std::vector< std::vector< cv::Point2f > > const &corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Send serial messages about each detection:
JEVOIS_DECLARE_PARAMETER(maxnumobj, size_t, "Max number of objects to declare a clean image. If more blobs are " "detected in a frame, we skip that frame before we even try to analyze shapes of the blobs", 100, ParamCateg)
Parameter.
void onParamChange(scue const &, unsigned char const &) override
virtual void process(jevois::InputFrame &&inframe) override
Processing function, no USB video output.
std::shared_ptr< Kalman1D > itsKalV
void detect(cv::Mat const &imghsv, size_t tnum, int dispx=3, int dispy=242, jevois::RawImage *outimg=nullptr)
HSV object detector, we run several of those in parallel with different hsvcue settings.
void onParamChange(hcue const &, unsigned char const &) override
jevois::CameraCalibration itsCalib
Camera calibration parameters.
std::vector< detection > itsDetections
Our detections, combined across all threads.
FirstVision(std::string const &instance)
Constructor.
virtual void process(jevois::InputFrame &&inframe, jevois::OutputFrame &&outframe) override
Processing function, with USB video output.
JEVOIS_DECLARE_PARAMETER(iou, double, "Intersection-over-union ratio over which duplicates are eliminated", 0.3, jevois::Range< double >(0.01, 0.99), ParamCateg)
Parameter.
std::shared_ptr< Kalman1D > itsKalH
Kalman filters to learn and adapt HSV windows over time.
void drawDetections(jevois::RawImage &outimg, std::vector< std::vector< cv::Point2f > > corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(scue, unsigned char, "Initial cue for target saturation lower bound", 50, ParamCateg)
Parameter.
bool itsCueChanged
True when users change ranges.
void updateStructuringElements()
Update the morphology structuring elements if needed.
JEVOIS_DECLARE_PARAMETER(dopose, bool, "Compute (and show) 6D object pose, requires a valid camera calibration. " "When dopose is true, 3D serial messages are sent out, otherwise 2D serial messages.", true, ParamCateg)
Parameter.
void cleanupDetections()
Clean up the detections by eliminating duplicates:
JEVOIS_DECLARE_PARAMETER(epsilon, double, "Shape smoothing factor (higher for smoother). Shape smoothing is applied " "to remove small contour defects before the shape is analyzed.", 0.015, jevois::Range< double >(0.001, 0.999), ParamCateg)
Parameter.
void estimatePose(std::vector< std::vector< cv::Point2f > > &corners, cv::OutputArray _rvecs, cv::OutputArray _tvecs)
Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners.
JEVOIS_DECLARE_PARAMETER(hullarea, jevois::Range< unsigned int >, "Range of object area (in pixels) to track. Use this " "if you want to skip shape analysis of very large or very small blobs", jevois::Range< unsigned int >(20 *20, 300 *300), ParamCateg)
Parameter.
JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(hcue, unsigned char, "Initial cue for target hue (0=red/do not use because of " "wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan, " "105=light blue, 120=blue, 135=purple, 150=pink)", 45, jevois::Range< unsigned char >(0, 179), ParamCateg)
Parameter.
JEVOIS_DECLARE_PARAMETER(debug, bool, "Show contours of all object candidates if true", false, ParamCateg)
Parameter.
void updateHSV(size_t nthreads)
Initialize (e.g., if user changes cue params) or update our HSV detection ranges.
JEVOIS_DECLARE_PARAMETER(hullfill, int, "Max fill ratio of the convex hull (percent). Lower values mean your shape " "occupies a smaller fraction of its convex hull. This parameter sets an upper bound, " "fuller shapes will be rejected.", 50, jevois::Range< int >(1, 100), ParamCateg)
Parameter.
cv::Mat itsDilateElement
JEVOIS_DECLARE_PARAMETER(threads, size_t, "Number of parallel vision processing threads. Thread 0 uses the HSV values " "provided by user parameters thread 1 broadens that fixed range a bit threads 2-3 use a " "narrow and broader learned HSV window over time", 4, jevois::Range< size_t >(2, 4), ParamCateg)
Parameter.
JEVOIS_DECLARE_PARAMETER(objsize, cv::Size_< float >, "Object size (in meters)", cv::Size_< float >(0.28F, 0.175F), ParamCateg)
Parameter.
void onParamChange(vcue const &, unsigned char const &) override
JEVOIS_DECLARE_PARAMETER(erodesize, size_t, "Erosion structuring element size (pixels), or 0 for no erosion", 2, ParamCateg)
Parameter.
JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(vcue, unsigned char, "Initial cue for target value (brightness) lower bound", 200, ParamCateg)
Parameter.
JEVOIS_DECLARE_PARAMETER(dilatesize, size_t, "Dilation structuring element size (pixels), or 0 for no dilation", 4, ParamCateg)
Parameter.
std::mutex itsDetMtx
JEVOIS_DECLARE_PARAMETER(ethresh, double, "Shape error threshold (lower is stricter for exact shape)", 900.0, jevois::Range< double >(0.01, 1000.0), ParamCateg)
Parameter.
JEVOIS_DECLARE_PARAMETER(margin, size_t, "Margin from from frame borders (pixels). If any corner of a detected shape " "gets closer than the margin to the frame borders, the shape will be rejected. This is to " "avoid possibly bogus 6D pose estimation when the shape starts getting truncated as it " "partially exits the camera's field of view.", 5, ParamCateg)
Parameter.
std::shared_ptr< Kalman1D > itsKalS
void learnHSV(size_t nthreads, cv::Mat const &imgbgr, jevois::RawImage *outimg=nullptr)
Learn and update our HSV ranges.
cv::Mat itsErodeElement
Erosion and dilation kernels shared across all detect threads.
std::vector< hsvcue > itsHSV
void postInit()
Load camera calibration on init.
JEVOIS_DECLARE_PARAMETER(showthread, size_t, "Thread number that is used to display HSV-thresholded image", 0, jevois::Range< size_t >(0, 3), ParamCateg)
Parameter.
unsigned int fmt
unsigned int width
unsigned int height
void require(char const *info, unsigned int w, unsigned int h, unsigned int f) const
void sendSerialStd3D(float x, float y, float z, float w=0.0F, float h=0.0F, float d=0.0F, float q1=0.0F, float q2=0.0F, float q3=0.0f, float q4=0.0F, std::string const &id="", std::string const &extra="")
StdModule(std::string const &instance)
void sendSerialContour2D(unsigned int camw, unsigned int camh, std::vector< cv::Point_< T > > points, std::string const &id="", std::string const &extra="")
std::string const & stop(double *seconds)
#define LFATAL(msg)
std::string warnAndIgnoreException(std::string const &prefix="")
void paste(RawImage const &src, RawImage &dest, int dx, int dy)
void writeText(RawImage &img, std::string const &txt, int x, int y, unsigned int col, Font font=Font6x10)
void drawFilledRect(RawImage &img, int x, int y, unsigned int w, unsigned int h, unsigned int col)
cv::Mat convertToCvBGR(RawImage const &src)
void drawLine(RawImage &img, int x1, int y1, int x2, int y2, unsigned int thick, unsigned int col)
void pasteGreyToYUYV(cv::Mat const &src, RawImage &dest, int dx, int dy)
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
std::string sformat(char const *fmt,...) __attribute__((format(__printf__
unsigned short constexpr MedPurple
unsigned short constexpr Black
unsigned short constexpr LightPink
unsigned short constexpr White
unsigned short constexpr MedGreen
unsigned short constexpr MedGrey
unsigned short constexpr LightGreen
Helper struct for a detected object.
std::vector< cv::Point > approx
Smoothed approximation of the contour.
size_t threadnum
Thread number that detected this object.
std::vector< cv::Point > hull
Convex hull of the contour.
std::vector< cv::Point > contour
The full detailed contour.
float serr
Shape error score (higher for rougher contours with defects)
Helper struct for an HSV range triplet, where each range is specified as a mean and sigma:
float sis
Mean and sigma for S.
cv::Scalar rmax() const
Get maximum triplet for use by cv::inRange()
void fix()
Fix ranges so they don't go out of bounds.
float siv
Mean and sigma for V.
float sih
Mean and sigma for H.
hsvcue(unsigned char h, unsigned char s, unsigned char v)
Constructor.
cv::Scalar rmin() const
Get minimum triplet for use by cv::inRange()
hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig, unsigned char v, unsigned char vsig)
Constructor.