JeVoisBase  1.6
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
FirstVision.C
Go to the documentation of this file.
1 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 //
3 // JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
4 // California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5 //
6 // This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7 // redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8 // Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 // without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10 // License for more details. You should have received a copy of the GNU General Public License along with this program;
11 // if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 //
13 // Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14 // Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16 /*! \file */
17 
18 #include <jevois/Core/Module.H>
19 #include <jevois/Debug/Log.H>
20 #include <jevois/Util/Utils.H>
22 #include <jevois/Debug/Timer.H>
24 
26 
27 #include <opencv2/core/core.hpp>
28 #include <opencv2/imgproc/imgproc.hpp>
29 #include <opencv2/calib3d/calib3d.hpp>
30 
31 #include <Eigen/Geometry> // for AngleAxis and Quaternion
32 
33 // REMINDER: make sure you understand the viral nature and terms of the above license. If you are writing code derived
34 // from this file, you must offer your source under the GPL license too.
35 
36 static jevois::ParameterCategory const ParamCateg("FirstVision Options");
37 
38 //! Parameter \relates FirstVision
39 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(hcue, unsigned char, "Initial cue for target hue (0=red/do not use because of "
40  "wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan, "
41  "105=light blue, 120=blue, 135=purple, 150=pink)",
42  45, jevois::Range<unsigned char>(0, 179), ParamCateg);
43 
44 //! Parameter \relates FirstVision
45 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(scue, unsigned char, "Initial cue for target saturation lower bound",
46  50, ParamCateg);
47 
48 //! Parameter \relates FirstVision
49 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(vcue, unsigned char, "Initial cue for target value (brightness) lower bound",
50  200, ParamCateg);
51 
52 //! Parameter \relates FirstVision
53 JEVOIS_DECLARE_PARAMETER(maxnumobj, size_t, "Max number of objects to declare a clean image. If more blobs are "
54  "detected in a frame, we skip that frame before we even try to analyze shapes of the blobs",
55  100, ParamCateg);
56 
57 //! Parameter \relates FirstVision
58 JEVOIS_DECLARE_PARAMETER(hullarea, jevois::Range<unsigned int>, "Range of object area (in pixels) to track. Use this "
59  "if you want to skip shape analysis of very large or very small blobs",
60  jevois::Range<unsigned int>(20*20, 300*300), ParamCateg);
61 
62 //! Parameter \relates FirstVision
63 JEVOIS_DECLARE_PARAMETER(hullfill, int, "Max fill ratio of the convex hull (percent). Lower values mean your shape "
64  "occupies a smaller fraction of its convex hull. This parameter sets an upper bound, "
65  "fuller shapes will be rejected.",
66  50, jevois::Range<int>(1, 100), ParamCateg);
67 
68 //! Parameter \relates FirstVision
69 JEVOIS_DECLARE_PARAMETER(erodesize, size_t, "Erosion structuring element size (pixels), or 0 for no erosion",
70  2, ParamCateg);
71 
72 //! Parameter \relates FirstVision
73 JEVOIS_DECLARE_PARAMETER(dilatesize, size_t, "Dilation structuring element size (pixels), or 0 for no dilation",
74  4, ParamCateg);
75 
76 //! Parameter \relates FirstVision
77 JEVOIS_DECLARE_PARAMETER(epsilon, double, "Shape smoothing factor (higher for smoother). Shape smoothing is applied "
78  "to remove small contour defects before the shape is analyzed.",
79  0.015, jevois::Range<double>(0.001, 0.999), ParamCateg);
80 
81 //! Parameter \relates FirstVision
82 JEVOIS_DECLARE_PARAMETER(debug, bool, "Show contours of all object candidates if true",
83  false, ParamCateg);
84 
85 //! Parameter \relates FirstVision
86 JEVOIS_DECLARE_PARAMETER(threads, size_t, "Number of parallel vision processing threads. Thread 0 uses the HSV values "
87  "provided by user parameters; thread 1 broadens that fixed range a bit; threads 2-3 use a "
88  "narrow and broader learned HSV window over time",
89  4, jevois::Range<size_t>(2, 4), ParamCateg);
90 
91 //! Parameter \relates FirstVision
92 JEVOIS_DECLARE_PARAMETER(showthread, size_t, "Thread number that is used to display HSV-thresholded image",
93  0, jevois::Range<size_t>(0, 3), ParamCateg);
94 
95 //! Parameter \relates FirstVision
96 JEVOIS_DECLARE_PARAMETER(ethresh, double, "Shape error threshold (lower is stricter for exact shape)",
97  900.0, jevois::Range<double>(0.01, 1000.0), ParamCateg);
98 
99 //! Parameter \relates FirstVision
100 JEVOIS_DECLARE_PARAMETER(dopose, bool, "Compute (and show) 6D object pose, requires a valid camera calibration. "
101  "When dopose is true, 3D serial messages are sent out, otherwise 2D serial messages.",
102  true, ParamCateg);
103 
104 //! Parameter \relates FirstVision
105 JEVOIS_DECLARE_PARAMETER(camparams, std::string, "File stem of camera parameters, or empty. Camera resolution "
106  "will be appended, as well as a .yaml extension. For example, specifying 'calibration' "
107  "here and running the camera sensor at 320x240 will attempt to load "
108  "calibration320x240.yaml from within directory " JEVOIS_SHARE_PATH "/camera/",
109  "calibration", ParamCateg);
110 
111 //! Parameter \relates FirstVision
112 JEVOIS_DECLARE_PARAMETER(iou, double, "Intersection-over-union ratio over which duplicates are eliminated",
113  0.3, jevois::Range<double>(0.01, 0.99), ParamCateg);
114 
115 //! Parameter \relates FirstVision
116 JEVOIS_DECLARE_PARAMETER(objsize, cv::Size_<float>, "Object size (in meters)",
117  cv::Size_<float>(0.28F, 0.175F), ParamCateg);
118 
119 //! Parameter \relates FirstVision
120 JEVOIS_DECLARE_PARAMETER(margin, size_t, "Margin from from frame borders (pixels). If any corner of a detected shape "
121  "gets closer than the margin to the frame borders, the shape will be rejected. This is to "
122  "avoid possibly bogus 6D pose estimation when the shape starts getting truncated as it "
123  "partially exits the camera's field of view.",
124  5, ParamCateg);
125 
126 //! Simple color-based detection of a U-shaped object for FIRST Robotics
127 /*! This module isolates pixels within a given HSV range (hue, saturation, and value of color pixels), does some
128  cleanups, and extracts object contours. It is looking for a rectangular U shape of a specific size (set by parameter
129  \p objsize). See screenshots for an example of shape. It sends information about detected objects over serial.
130 
131  This module usually works best with the camera sensor set to manual exposure, manual gain, manual color balance, etc
132  so that HSV color values are reliable. See the file \b script.cfg file in this module's directory for an example of
133  how to set the camera settings each time this module is loaded.
134 
135  This code was loosely inspired by the JeVois \jvmod{ObjectTracker} module. Also see \jvmod{FirstPython} for a
136  simplified version of this module, written in Python.
137 
138  This module is provided for inspiration. It has no pretension of actually solving the FIRST Robotics vision problem
139  in a complete and reliable way. It is released in the hope that FRC teams will try it out and get inspired to
140  develop something much better for their own robot.
141 
142  General pipeline
143  ----------------
144 
145  The basic idea of this module is the classic FIRST robotics vision pipeline: first, select a range of pixels in HSV
146  color pixel space likely to include the object. Then, detect contours of all blobs in range. Then apply some tests
147  on the shape of the detected blobs, their size, fill ratio (ratio of object area compared to its convex hull's
148  area), etc. Finally, estimate the location and pose of the object in the world.
149 
150  In this module, we run up to 4 pipelines in parallel, using different settings for the range of HSV pixels
151  considered:
152 
153  - Pipeline 0 uses the HSV values provided by user parameters;
154  - Pipeline 1 broadens that fixed range a bit;
155  - Pipelines 2-3 use a narrow and broader learned HSV window over time.
156 
157  Detections from all 4 pipelines are considered for overlap and quality (raggedness of their outlines), and only the
158  cleanest of several overlapping detections is preserved. From those cleanest detections, pipelines 2-3 learn and
159  adapt the HSV range for future video frames.
160 
161  Using this module
162  -----------------
163 
164  Check out [this tutorial](http://jevois.org/tutorials/UserFirstVision.html).
165 
166  Detection and quality control steps
167  -----------------------------------
168 
169  The following messages appear for each of the 4 pipelines, at the bottom of the demo video, to help users figure out
170  why their object may not be detected:
171 
172  - T0 to T3: thread (pipeline) number
173  - H=..., S=..., V=...: HSV range considered by that thread
174  - N=...: number of raw blobs detected in that range
175  - Because N blobs are considered in each thread from this point on, information about only the one that progressed
176  the farthest through a series of tests is shown. One letter is added each time a test is passed:
177  + H: the convex hull of the blob is quadrilateral (4 vertices)
178  + A: hull area is within range specified by parameter \p hullarea
179  + F: object to hull fill ratio is below the limit set by parameter \p hullfill (i.e., object is not a solid,
180  filled quadrilateral shape)
181  + S: the object has 8 vertices after shape smoothing to eliminate small shape defects (a U shape is
182  indeed expected to have 8 vertices).
183  + E: The shape discrepency between the original shape and the smoothed shape is acceptable per parameter
184  \p ethresh, i.e., the original contour did not have a lot of defects.
185  + M: the shape is not too close to the borders of the image, per parameter \p margin, i.e., it is unlikely to
186  be truncated as the object partially exits the camera's field of view.
187  + V: Vectors describing the shape as it related to its convex hull are non-zero, i.e., the centroid of the shape
188  is not exactly coincident with the centroid of its convex hull, as we would expect for a U shape.
189  + U: the shape is roughly upright; upside-down U shapes are rejected as likely spurious.
190  + OK: this thread detected at least one shape that passed all the tests.
191 
192  The black and white picture at right shows the pixels in HSV range for the thread determined by parameter \p
193  showthread (with value 0 by default).
194 
195  Serial Messages
196  ---------------
197 
198  This module can send standardized serial messages as described in \ref UserSerialStyle. One message is issued on
199  every video frame for each detected and good object. The \p id field in the messages simply is \b FIRST for all
200  messages.
201 
202  When \p dopose is turned on, 3D messages will be sent, otherwise 2D messages.
203 
204  2D messages when \p dopose is off:
205 
206  - Serial message type: \b 2D
207  - `id`: always `FIRST`
208  - `x`, `y`, or vertices: standardized 2D coordinates of object center or corners
209  - `w`, `h`: standardized marker size
210  - `extra`: none (empty string)
211 
212  3D messages when \p dopose is on:
213 
214  - Serial message type: \b 3D
215  - `id`: always `FIRST`
216  - `x`, `y`, `z`, or vertices: 3D coordinates in millimeters of object center, or corners
217  - `w`, `h`, `d`: object size in millimeters, a depth of 1mm is always used
218  - `extra`: none (empty string)
219 
220  NOTE: 3D pose estimation from low-resolution 176x144 images at 120fps can be quite noisy. Make sure you tune your
221  HSV ranges very well if you want to operate at 120fps (see below). To operate more reliably at very low resolutions,
222  one may want to improve this module by adding subpixel shape refinement and tracking across frames.
223 
224  Trying it out
225  -------------
226 
227  The default parameter settings (which are set in \b script.cfg explained below) attempt to detect yellow-green
228  objects. Present an object to the JeVois camera and see whether it is detected. When detected and good
229  enough according to a number of quality control tests, the outline of the object is drawn.
230 
231  For further use of this module, you may want to check out the following tutorials:
232 
233  - [Using the sample FIRST Robotics vision module](http://jevois.org/tutorials/UserFirstVision.html)
234  - [Tuning the color-based object tracker using a python graphical
235  interface](http://jevois.org/tutorials/UserColorTracking.html)
236  - [Making a motorized pan-tilt head for JeVois and tracking
237  objects](http://jevois.org/tutorials/UserPanTilt.html)
238  - \ref ArduinoTutorial
239 
240  Tuning
241  ------
242 
243  You need to provide the exact width and height of your physical shape to parameter \p objsize for this module to
244  work. It will look for a shape of that physical size (though at any distance and orientation from the camera). Be
245  sure you edit \b script.cfg and set the parameter \p objsize in there to the true measured physical size of your
246  shape.
247 
248  You should adjust parameters \p hcue, \p scue, and \p vcue to isolate the range of Hue, Saturation, and Value
249  (respectively) that correspond to the objects you want to detect. Note that there is a \b script.cfg file in this
250  module's directory that provides a range tuned to a light yellow-green object, as shown in the demo screenshot.
251 
252  Tuning the parameters is best done interactively by connecting to your JeVois camera while it is looking at some
253  object of the desired color. Once you have achieved a tuning, you may want to set the hcue, scue, and vcue
254  parameters in your \b script.cfg file for this module on the microSD card (see below).
255 
256  Typically, you would start by narrowing down on the hue, then the value, and finally the saturation. Make sure you
257  also move your camera around and show it typical background clutter so check for false positives (detections of
258  things which you are not interested, which can happen if your ranges are too wide).
259 
260  Config file
261  -----------
262 
263  JeVois allows you to store parameter settings and commands in a file named \b script.cfg stored in the directory of
264  a module. The file \b script.cfg may contain any sequence of commands as you would type them interactively in the
265  JeVois command-line interface. For the \jvmod{FirstVision} module, a default script is provided that sets the camera
266  to manual color, gain, and exposure mode (for more reliable color values), and other example parameter values.
267 
268  The \b script.cfg file for \jvmod{FirstVision} is stored on your microSD at
269  <b>JEVOIS:/modules/JeVois/FirstVision/script.cfg</b>
270 
271  @author Laurent Itti
272 
273  @videomapping YUYV 176 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
274  @videomapping YUYV 352 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
275  @videomapping YUYV 320 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
276  @videomapping YUYV 640 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
277  @videomapping NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstVision
278  @videomapping NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstVision
279  @email itti\@usc.edu
280  @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
281  @copyright Copyright (C) 2017 by Laurent Itti, iLab and the University of Southern California
282  @mainurl http://jevois.org
283  @supporturl http://jevois.org/doc
284  @otherurl http://iLab.usc.edu
285  @license GPL v3
286  @distribution Unrestricted
287  @restrictions None
288  \ingroup modules */
290  public jevois::Parameter<hcue, scue, vcue, maxnumobj, hullarea, hullfill, erodesize,
291  dilatesize, epsilon, debug, threads, showthread, ethresh,
292  dopose, camparams, iou, objsize, margin>
293 {
294  protected:
295  cv::Mat itsCamMatrix; //!< Our camera matrix
296  cv::Mat itsDistCoeffs; //!< Our camera distortion coefficients
297  bool itsCueChanged = true; //!< True when users change ranges
298 
299  void onParamChange(hcue const & param, unsigned char const & newval) { itsCueChanged = true; }
300  void onParamChange(scue const & param, unsigned char const & newval) { itsCueChanged = true; }
301  void onParamChange(vcue const & param, unsigned char const & newval) { itsCueChanged = true; }
302 
303  // ####################################################################################################
304  //! Helper struct for an HSV range triplet, where each range is specified as a mean and sigma:
305  /*! Note that sigma is used differently for H, S, and V, under the assumption that we want to track a bright target:
306  For H, the range is [mean-sigma .. mean+sigma]. For S and V, the range is [mean-sigma .. 255]. See rmin() and
307  rmax() for details. */
308  struct hsvcue
309  {
310  //! Constructor
311  hsvcue(unsigned char h, unsigned char s, unsigned char v) : muh(h), sih(30), mus(s), sis(20), muv(v), siv(20)
312  { fix(); }
313 
314  //! Constructor
315  hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig,
316  unsigned char v, unsigned char vsig) : muh(h), sih(hsig), mus(s), sis(ssig), muv(v), siv(vsig)
317  { fix(); }
318 
319  //! Fix ranges so they don't go out of bounds
320  void fix()
321  {
322  muh = std::min(179.0F, std::max(1.0F, muh)); sih = std::max(1.0F, std::min(sih, 360.0F));
323  mus = std::min(254.0F, std::max(1.0F, mus)); sis = std::max(1.0F, std::min(sis, 512.0F));
324  muv = std::min(254.0F, std::max(1.0F, muv)); siv = std::max(1.0F, std::min(siv, 512.0F));
325  }
326 
327  //! Get minimum triplet for use by cv::inRange()
328  cv::Scalar rmin() const
329  { return cv::Scalar(std::max(0.0F, muh - sih), std::max(0.0F, mus - sis), std::max(0.0F, muv - siv)); }
330 
331  //! Get maximum triplet for use by cv::inRange()
332  cv::Scalar rmax() const
333  { return cv::Scalar(std::min(179.0F, muh + sih), 255, 255); }
334 
335  float muh, sih; //!< Mean and sigma for H
336  float mus, sis; //!< Mean and sigma for S
337  float muv, siv; //!< Mean and sigma for V
338  };
339 
340  std::vector<hsvcue> itsHSV;
341 
342  // ####################################################################################################
343  //! Helper struct for a detected object
344  struct detection
345  {
346  std::vector<cv::Point> contour; //!< The full detailed contour
347  std::vector<cv::Point> approx; //!< Smoothed approximation of the contour
348  std::vector<cv::Point> hull; //!< Convex hull of the contour
349  size_t threadnum; //!< Thread number that detected this object
350  float serr; //!< Shape error score (higher for rougher contours with defects)
351  };
352 
353  //! Our detections, combined across all threads
354  std::vector<detection> itsDetections;
355  std::mutex itsDetMtx;
356 
357  //! Kalman filters to learn and adapt HSV windows over time
358  std::shared_ptr<Kalman1D> itsKalH, itsKalS, itsKalV;
359 
360  //! Erosion and dilation kernels shared across all detect threads
362 
363  // ####################################################################################################
364  //! ParallelLoopBody class for the parallelization of the single markers pose estimation
365  /*! Derived from opencv_contrib ArUco module, it's just a simple solvePnP inside. */
366  class SinglePoseEstimationParallel : public cv::ParallelLoopBody
367  {
368  public:
369  SinglePoseEstimationParallel(cv::Mat & _objPoints, cv::InputArrayOfArrays _corners,
370  cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
371  cv::Mat & _rvecs, cv::Mat & _tvecs) :
372  objPoints(_objPoints), corners(_corners), cameraMatrix(_cameraMatrix),
373  distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs)
374  { }
375 
376  void operator()(cv::Range const & range) const
377  {
378  int const begin = range.start;
379  int const end = range.end;
380 
381  for (int i = begin; i < end; ++i)
382  cv::solvePnP(objPoints, corners.getMat(i), cameraMatrix, distCoeffs,
383  rvecs.at<cv::Vec3d>(i), tvecs.at<cv::Vec3d>(i));
384  }
385 
386  private:
387  cv::Mat & objPoints;
388  cv::InputArrayOfArrays corners;
389  cv::InputArray cameraMatrix, distCoeffs;
390  cv::Mat & rvecs, tvecs;
391  };
392 
393  // ####################################################################################################
394  // ####################################################################################################
395  // ####################################################################################################
396 
397  public:
398  // ####################################################################################################
399  //! Constructor
400  FirstVision(std::string const & instance) : jevois::StdModule(instance)
401  {
402  itsKalH = addSubComponent<Kalman1D>("kalH");
403  itsKalS = addSubComponent<Kalman1D>("kalS");
404  itsKalV = addSubComponent<Kalman1D>("kalV");
405  }
406 
407  // ####################################################################################################
408  //! Virtual destructor for safe inheritance
409  virtual ~FirstVision() { }
410 
411  // ####################################################################################################
412  //! Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners
413  /*! Inspired from the ArUco module of opencv_contrib
414  The corners array is always filled, but rvecs and tvecs only are if dopose is true */
415  void estimatePose(std::vector<std::vector<cv::Point2f> > & corners, cv::OutputArray _rvecs,
416  cv::OutputArray _tvecs)
417  {
418  auto const osiz = objsize::get();
419 
420  // Get a vector of all our corners so we can map them to 3D and draw them:
421  corners.clear();
422  for (detection const & d : itsDetections)
423  {
424  corners.push_back(std::vector<cv::Point2f>());
425  std::vector<cv::Point2f> & v = corners.back();
426  for (auto const & p : d.hull) v.push_back(cv::Point2f(p));
427  }
428 
429  if (dopose::get())
430  {
431  // set coordinate system in the middle of the object, with Z pointing out
432  cv::Mat objPoints(4, 1, CV_32FC3);
433  objPoints.ptr< cv::Vec3f >(0)[0] = cv::Vec3f(-osiz.width * 0.5F, -osiz.height * 0.5F, 0);
434  objPoints.ptr< cv::Vec3f >(0)[1] = cv::Vec3f(-osiz.width * 0.5F, osiz.height * 0.5F, 0);
435  objPoints.ptr< cv::Vec3f >(0)[2] = cv::Vec3f(osiz.width * 0.5F, osiz.height * 0.5F, 0);
436  objPoints.ptr< cv::Vec3f >(0)[3] = cv::Vec3f(osiz.width * 0.5F, -osiz.height * 0.5F, 0);
437 
438  int nobj = (int)corners.size();
439  _rvecs.create(nobj, 1, CV_64FC3); _tvecs.create(nobj, 1, CV_64FC3);
440  cv::Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
441  cv::parallel_for_(cv::Range(0, nobj), SinglePoseEstimationParallel(objPoints, corners, itsCamMatrix,
442  itsDistCoeffs, rvecs, tvecs));
443  }
444  }
445 
446  // ####################################################################################################
447  //! Load camera calibration parameters
448  void loadCameraCalibration(unsigned int w, unsigned int h)
449  {
451 
452  std::string const cpf = std::string(JEVOIS_SHARE_PATH) + "/camera/" + camparams::get() +
453  std::to_string(w) + 'x' + std::to_string(h) + ".yaml";
454 
455  cv::FileStorage fs(cpf, cv::FileStorage::READ);
456  if (fs.isOpened())
457  {
458  fs["camera_matrix"] >> itsCamMatrix;
459  fs["distortion_coefficients"] >> itsDistCoeffs;
460  LINFO("Loaded camera calibration from " << cpf);
461  }
462  else LFATAL("Failed to read camera parameters from file [" << cpf << "]");
463  }
464 
465  // ####################################################################################################
466  //! HSV object detector, we run several of those in parallel with different hsvcue settings
467  void detect(cv::Mat const & imghsv, size_t tnum, int dispx = 3, int dispy = 242, jevois::RawImage *outimg = nullptr)
468  {
469  // Threshold the HSV image to only keep pixels within the desired HSV range:
470  cv::Mat imgth;
471  hsvcue const & hsv = itsHSV[tnum]; cv::Scalar const rmin = hsv.rmin(), rmax = hsv.rmax();
472  cv::inRange(imghsv, rmin, rmax, imgth);
473  std::string str = jevois::sformat("T%zu: H=%03d-%03d S=%03d-%03d V=%03d-%03d ", tnum, int(rmin.val[0]),
474  int(rmax.val[0]), int(rmin.val[1]), int(rmax.val[1]),
475  int(rmin.val[2]), int(rmax.val[2]));
476 
477  // Apply morphological operations to cleanup the image noise:
478  if (itsErodeElement.empty() == false) cv::erode(imgth, imgth, itsErodeElement);
479  if (itsDilateElement.empty() == false) cv::dilate(imgth, imgth, itsDilateElement);
480 
481  // Detect objects by finding contours:
482  std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy;
483  cv::findContours(imgth, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
484  str += jevois::sformat("N=%03d ", hierarchy.size());
485 
486  double const epsi = epsilon::get();
487  int const m = margin::get();
488 
489  // Identify the "good" objects:
490  std::string str2, beststr2;
491  if (hierarchy.size() > 0 && hierarchy.size() <= maxnumobj::get())
492  {
493  for (int index = 0; index >= 0; index = hierarchy[index][0])
494  {
495  // Keep track of our best detection so far:
496  if (str2.length() > beststr2.length()) beststr2 = str2;
497  str2.clear();
498 
499  // Let's examine this contour:
500  std::vector<cv::Point> const & c = contours[index];
501  detection d;
502 
503  // Compute contour area:
504  double const area = cv::contourArea(c, false);
505 
506  // Compute convex hull:
507  std::vector<cv::Point> rawhull;
508  cv::convexHull(c, rawhull, true);
509  double const rawhullperi = cv::arcLength(rawhull, true);
510  cv::approxPolyDP(rawhull, d.hull, epsi * rawhullperi * 3.0, true);
511 
512  // Is it the right shape?
513  if (d.hull.size() != 4) continue; // 4 vertices for the rectangular convex outline (shows as a trapezoid)
514  str2 += "H"; // Hull is quadrilateral
515 
516  double const huarea = cv::contourArea(d.hull, false);
517  if ( ! hullarea::get().contains(int(huarea + 0.4999))) continue;
518  str2 += "A"; // Hull area ok
519 
520  int const hufill = int(area / huarea * 100.0 + 0.4999);
521  if (hufill > hullfill::get()) continue;
522  str2 += "F"; // Fill is ok
523 
524  // Check object shape:
525  double const peri = cv::arcLength(c, true);
526  cv::approxPolyDP(c, d.approx, epsi * peri, true);
527  if (d.approx.size() < 7 || d.approx.size() > 9) continue; // 8 vertices for a U shape
528  str2 += "S"; // Shape is ok
529 
530  // Compute contour serr:
531  d.serr = 100.0 * cv::matchShapes(c, d.approx, cv::CONTOURS_MATCH_I1, 0.0);
532  if (d.serr > ethresh::get()) continue;
533  str2 += "E"; // Shape error is ok
534 
535  // Reject the shape if any of its vertices gets within the margin of the image bounds. This is to avoid
536  // getting grossly incorrect 6D pose estimates as the shape starts getting truncated as it partially exits the
537  // camera field of view:
538  bool reject = false;
539  for (int i = 0; i < c.size(); ++i)
540  if (c[i].x < m || c[i].x >= imghsv.cols - m || c[i].y < m || c[i].y >= imghsv.rows - m)
541  { reject = true; break; }
542  if (reject) continue;
543  str2 += "M"; // Margin ok
544 
545  // Re-order the 4 points in the hull if needed: In the pose estimation code, we will assume vertices ordered
546  // as follows:
547  //
548  // 0| |3
549  // | |
550  // | |
551  // 1----------2
552 
553  // v10+v23 should be pointing outward the U more than v03+v12 is:
554  std::complex<float> v10p23(float(d.hull[0].x - d.hull[1].x + d.hull[3].x - d.hull[2].x),
555  float(d.hull[0].y - d.hull[1].y + d.hull[3].y - d.hull[2].y));
556  float const len10p23 = std::abs(v10p23);
557  std::complex<float> v03p12(float(d.hull[3].x - d.hull[0].x + d.hull[2].x - d.hull[1].x),
558  float(d.hull[3].y - d.hull[0].y + d.hull[2].y - d.hull[1].y));
559  float const len03p12 = std::abs(v03p12);
560 
561  // Vector from centroid of U shape to centroid of its hull should also point outward of the U:
562  cv::Moments const momC = cv::moments(c);
563  cv::Moments const momH = cv::moments(d.hull);
564  std::complex<float> vCH(momH.m10 / momH.m00 - momC.m10 / momC.m00, momH.m01 / momH.m00 - momC.m01 / momC.m00);
565  float const lenCH = std::abs(vCH);
566 
567  if (len10p23 < 0.1F || len03p12 < 0.1F || lenCH < 0.1F) continue;
568  str2 += "V"; // Shape vectors ok
569 
570  float const good = (v10p23.real() * vCH.real() + v10p23.imag() * vCH.imag()) / (len10p23 * lenCH);
571  float const bad = (v03p12.real() * vCH.real() + v03p12.imag() * vCH.imag()) / (len03p12 * lenCH);
572 
573  // We reject upside-down detections as those are likely to be spurious:
574  if (vCH.imag() >= -2.0F) continue;
575  str2 += "U"; // U shape is upright
576 
577  // Fixup the ordering of the vertices if needed:
578  if (bad > good) { d.hull.insert(d.hull.begin(), d.hull.back()); d.hull.pop_back(); }
579 
580  // This detection is a keeper:
581  str2 += " OK";
582  d.contour = c;
583  std::lock_guard<std::mutex> _(itsDetMtx);
584  itsDetections.push_back(d);
585  }
586  if (str2.length() > beststr2.length()) beststr2 = str2;
587  }
588 
589  // Display any results requested by the users:
590  if (outimg && outimg->valid())
591  {
592  if (tnum == showthread::get() && outimg->width == 2 * imgth.cols)
593  jevois::rawimage::pasteGreyToYUYV(imgth, *outimg, imgth.cols, 0);
594  jevois::rawimage::writeText(*outimg, str + beststr2, dispx, dispy + 12*tnum, jevois::yuyv::White);
595  }
596  }
597 
598  // ####################################################################################################
599  //! Initialize (e.g., if user changes cue params) or update our HSV detection ranges
600  void updateHSV(size_t nthreads)
601  {
602  float const spread = 0.2F;
603 
604  if (itsHSV.empty() || itsCueChanged)
605  {
606  // Initialize or reset because of user parameter change:
607  itsHSV.clear(); itsCueChanged = false;
608  for (size_t i = 0; i < nthreads; ++i)
609  {
610  hsvcue cue(hcue::get(), scue::get(), vcue::get());
611  cue.sih *= (1.0F + spread * i); cue.sis *= (1.0F + spread * i); cue.siv *= (1.0F + spread * i);
612  cue.fix();
613  itsHSV.push_back(cue);
614  }
615  if (nthreads > 2)
616  {
617  itsKalH->set(hcue::get()); itsKalH->get();
618  itsKalS->set(scue::get()); itsKalS->get();
619  itsKalV->set(vcue::get()); itsKalV->get();
620  }
621  }
622  else
623  {
624  // Kalman update:
625  if (nthreads > 2)
626  {
627  itsHSV[2].muh = itsKalH->get();
628  itsHSV[2].mus = itsKalS->get();
629  itsHSV[2].muv = itsKalV->get();
630  itsHSV[2].fix();
631  for (size_t i = 3; i < itsHSV.size(); ++i)
632  {
633  itsHSV[i] = itsHSV[2];
634  itsHSV[i].sih *= (1.0F + spread * i);
635  itsHSV[i].sis *= (1.0F + spread * i);
636  itsHSV[i].siv *= (1.0F + spread * i);
637  itsHSV[i].fix();
638  }
639  }
640  }
641  }
642 
643  // ####################################################################################################
644  //! Clean up the detections by eliminating duplicates:
646  {
647  bool keepgoing = true;
648  double const iouth = iou::get();
649 
650  while (keepgoing)
651  {
652  // We will stop if we do not eliminate any more objects:
653  keepgoing = false; int delidx = -1;
654 
655  // Loop over all pairs of objects:
656  size_t const siz = itsDetections.size();
657  for (size_t i = 0; i < siz; ++i)
658  {
659  for (size_t j = 0; j < i; ++j)
660  {
661  std::vector<cv::Point> pts = itsDetections[i].hull;
662  for (cv::Point const & p : itsDetections[j].hull) pts.push_back(p);
663  std::vector<cv::Point> hull;
664  cv::convexHull(pts, hull); // FIXME should do a true union! this is just an approximation to it
665  double uarea = cv::contourArea(hull);
666  double iarea = cv::contourArea(itsDetections[i].hull) + cv::contourArea(itsDetections[j].hull) - uarea;
667 
668  // note: object detection code guarantees non-zero area:
669  double const inoun = iarea / uarea;
670  if (inoun >= iouth)
671  {
672  if (itsDetections[i].serr > itsDetections[j].serr) delidx = j; else delidx = i;
673  break;
674  }
675  }
676  if (delidx != -1) break;
677  }
678  if (delidx != -1) { itsDetections.erase(itsDetections.begin() + delidx); keepgoing = true; }
679  }
680  }
681 
682  // ####################################################################################################
683  //! Learn and update our HSV ranges
684  void learnHSV(size_t nthreads, cv::Mat const & imgbgr, jevois::RawImage *outimg = nullptr)
685  {
686  int const w = imgbgr.cols, h = imgbgr.rows;
687 
688  // Compute the median filtered BGR image in a thread:
689  cv::Mat medimgbgr;
690  auto median_fut = std::async(std::launch::async, [&](){ cv::medianBlur(imgbgr, medimgbgr, 3); } );
691 
692  // Get all the cleaned-up contours:
693  std::vector<std::vector<cv::Point> > contours;
694  for (detection const & d : itsDetections) contours.push_back(d.contour);
695 
696  // If desired, draw all contours:
697  std::future<void> drawc_fut;
698  if (debug::get() && outimg && outimg->valid())
699  drawc_fut = std::async(std::launch::async, [&]() {
700  // We reinterpret the top portion of our YUYV output image as an opencv 8UC2 image:
701  cv::Mat outuc2(outimg->height, outimg->width, CV_8UC2, outimg->pixelsw<unsigned char>());
702  cv::drawContours(outuc2, contours, -1, jevois::yuyv::LightPink, 2);
703  } );
704 
705  // Draw all the filled contours into a binary mask image:
706  cv::Mat mask(h, w, CV_8UC1, (unsigned char)0);
707  cv::drawContours(mask, contours, -1, 255, -1); // last -1 is for filled
708 
709  // Wait until median filter is done:
710  median_fut.get();
711 
712  // Compute mean and std BGR values inside objects:
713  cv::Mat mean, std;
714  cv::meanStdDev(medimgbgr, mean, std, mask);
715 
716  // Convert to HSV:
717  cv::Mat bgrmean(2, 1, CV_8UC3); bgrmean.at<cv::Vec3b>(0, 0) = mean; bgrmean.at<cv::Vec3b>(1, 0) = std;
718  cv::Mat hsvmean; cv::cvtColor(bgrmean, hsvmean, cv::COLOR_BGR2HSV);
719 
720  cv::Vec3b hsv = hsvmean.at<cv::Vec3b>(0, 0);
721  int H = hsv.val[0], S = hsv.val[1], V = hsv.val[2];
722 
723  cv::Vec3b sighsv = hsvmean.at<cv::Vec3b>(1, 0);
724  int sH = sighsv.val[0], sS = sighsv.val[1], sV = sighsv.val[2];
725 
726  // Set the new measurements:
727  itsKalH->set(H); itsKalS->set(S); itsKalV->set(V);
728 
729  if (nthreads > 2)
730  {
731  float const eta = 0.4F;
732  itsHSV[2].sih = (1.0F - eta) * itsHSV[2].sih + eta * sH;
733  itsHSV[2].sis = (1.0F - eta) * itsHSV[2].sis + eta * sS;
734  itsHSV[2].siv = (1.0F - eta) * itsHSV[2].siv + eta * sV;
735  itsHSV[2].fix();
736  }
737 
738  // note: drawc_fut may block us here until it is complete.
739  }
740 
741  // ####################################################################################################
742  //! Send serial messages about each detection:
743  void sendAllSerial(int w, int h, std::vector<std::vector<cv::Point2f> > const & corners,
744  std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
745  {
746  if (rvecs.empty() == false)
747  {
748  // If we have rvecs and tvecs, we are doing 3D pose estimation, so send a 3D message:
749  auto const osiz = objsize::get();
750  for (size_t i = 0; i < corners.size(); ++i)
751  {
752  std::vector<cv::Point2f> const & curr = corners[i];
753  cv::Vec3d const & rv = rvecs[i];
754  cv::Vec3d const & tv = tvecs[i];
755 
756  // Compute quaternion:
757  float theta = std::sqrt(rv[0] * rv[0] + rv[1] * rv[1] + rv[2] * rv[2]);
758  Eigen::Vector3f axis(rv[0], rv[1], rv[2]);
759  Eigen::Quaternion<float> q(Eigen::AngleAxis<float>(theta, axis));
760 
761  sendSerialStd3D(tv[0], tv[1], tv[2], // position
762  osiz.width, osiz.height, 1.0F, // size
763  q.w(), q.x(), q.y(), q.z(), // pose
764  "FIRST"); // FIRST robotics shape
765  }
766  }
767  else
768  {
769  // Send one 2D message per object:
770  for (size_t i = 0; i < corners.size(); ++i)
771  sendSerialContour2D(w, h, corners[i], "FIRST");
772  }
773  }
774 
775  // ####################################################################################################
776  //! Update the morphology structuring elements if needed
778  {
779  int e = erodesize::get();
780  if (e != itsErodeElement.cols)
781  {
782  if (e) itsErodeElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(e, e));
783  else itsErodeElement.release();
784  }
785 
786  int d = dilatesize::get();
787  if (d != itsDilateElement.cols)
788  {
789  if (d) itsDilateElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(d, d));
790  else itsDilateElement.release();
791  }
792  }
793 
794  // ####################################################################################################
795  //! Processing function, no USB video output
796  virtual void process(jevois::InputFrame && inframe) override
797  {
798  static jevois::Timer timer("processing");
799 
800  // Wait for next available camera image. Any resolution ok:
801  jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
802 
803  timer.start();
804 
805  // Load camera calibration if needed:
806  if (itsCamMatrix.empty()) loadCameraCalibration(w, h);
807 
808  // Convert input image to BGR24, then to HSV:
809  cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
810  cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
811  size_t const nthreads = threads::get();
812 
813  // Make sure our HSV range parameters are up to date:
814  updateHSV(nthreads);
815 
816  // Clear any old detections and get ready to parallelize the detection work:
817  itsDetections.clear();
819 
820  // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
821  std::vector<std::future<void> > dfut;
822  for (size_t i = 0; i < nthreads - 1; ++i)
823  dfut.push_back(std::async(std::launch::async, [&](size_t tn) { detect(imghsv, tn, 3, h+2); }, i));
824  detect(imghsv, nthreads - 1, 3, h+2);
825 
826  // Wait for all threads to complete:
827  for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
828 
829  // Let camera know we are done processing the input image:
830  inframe.done();
831 
832  // Clean up the detections by eliminating duplicates:
834 
835  // Learn the object's HSV value over time:
836  auto learn_fut = std::async(std::launch::async, [&]() { learnHSV(nthreads, imgbgr); });
837 
838  // Map to 6D (inverse perspective):
839  std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
840  estimatePose(corners, rvecs, tvecs);
841 
842  // Send all serial messages:
843  sendAllSerial(w, h, corners, rvecs, tvecs);
844 
845  // Wait for all threads:
846  try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
847 
848  // Show processing fps:
849  timer.stop();
850  }
851 
852  // ####################################################################################################
853  //! Processing function, with USB video output
854  virtual void process(jevois::InputFrame && inframe, jevois::OutputFrame && outframe) override
855  {
856  static jevois::Timer timer("processing");
857 
858  // Wait for next available camera image. Any resolution ok, but require YUYV since we assume it for drawings:
859  jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
860  inimg.require("input", w, h, V4L2_PIX_FMT_YUYV);
861 
862  timer.start();
863 
864  // Load camera calibration if needed:
865  if (itsCamMatrix.empty()) loadCameraCalibration(w, h);
866 
867  // While we process it, start a thread to wait for output frame and paste the input image into it:
868  jevois::RawImage outimg; // main thread should not use outimg until paste thread is complete
869  auto paste_fut = std::async(std::launch::async, [&]() {
870  outimg = outframe.get();
871  outimg.require("output", outimg.width, h + 50, inimg.fmt);
872  if (outimg.width != w && outimg.width != w * 2) LFATAL("Output image width should be 1x or 2x input width");
873  jevois::rawimage::paste(inimg, outimg, 0, 0);
874  jevois::rawimage::writeText(outimg, "JeVois FIRST Vision", 3, 3, jevois::yuyv::White);
875  jevois::rawimage::drawFilledRect(outimg, 0, h, outimg.width, outimg.height-h, jevois::yuyv::Black);
876  });
877 
878  // Convert input image to BGR24, then to HSV:
879  cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
880  cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
881  size_t const nthreads = threads::get();
882 
883  // Make sure our HSV range parameters are up to date:
884  updateHSV(nthreads);
885 
886  // Clear any old detections and get ready to parallelize the detection work:
887  itsDetections.clear();
889 
890  // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
891  std::vector<std::future<void> > dfut;
892  for (size_t i = 0; i < nthreads - 1; ++i)
893  dfut.push_back(std::async(std::launch::async, [&](size_t tn) { detect(imghsv, tn, 3, h+2, &outimg); }, i));
894  detect(imghsv, nthreads - 1, 3, h+2, &outimg);
895 
896  // Wait for all threads to complete:
897  for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
898 
899  // Wait for paste to finish up:
900  paste_fut.get();
901 
902  // Let camera know we are done processing the input image:
903  inframe.done();
904 
905  // Clean up the detections by eliminating duplicates:
907 
908  // Learn the object's HSV value over time:
909  auto learn_fut = std::async(std::launch::async, [&]() { learnHSV(nthreads, imgbgr, &outimg); });
910 
911  // Map to 6D (inverse perspective):
912  std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
913  estimatePose(corners, rvecs, tvecs);
914 
915  // Send all serial messages:
916  sendAllSerial(w, h, corners, rvecs, tvecs);
917 
918  // Draw all detections in 3D:
919  drawDetections(outimg, corners, rvecs, tvecs);
920 
921  // Show number of detected objects:
922  jevois::rawimage::writeText(outimg, "Detected " + std::to_string(itsDetections.size()) + " objects.",
923  w + 3, 3, jevois::yuyv::White);
924 
925  // Wait for all threads:
926  try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
927 
928  // Show processing fps:
929  std::string const & fpscpu = timer.stop();
930  jevois::rawimage::writeText(outimg, fpscpu, 3, h - 13, jevois::yuyv::White);
931 
932  // Send the output image with our processing results to the host over USB:
933  outframe.send();
934  }
935 
936  // ####################################################################################################
937  void drawDetections(jevois::RawImage & outimg, std::vector<std::vector<cv::Point2f> > corners,
938  std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
939  {
940  auto const osiz = objsize::get(); float const w = osiz.width, h = osiz.height;
941  int nobj = int(corners.size());
942 
943  // This code is like drawDetectedMarkers() in cv::aruco, but for YUYV output image:
944  if (rvecs.empty())
945  {
946  // We are not doing 3D pose estimation. Just draw object outlines in 2D:
947  for (int i = 0; i < nobj; ++i)
948  {
949  std::vector<cv::Point2f> const & obj = corners[i];
950 
951  // draw marker sides:
952  for (int j = 0; j < 4; ++j)
953  {
954  cv::Point2f const & p0 = obj[j];
955  cv::Point2f const & p1 = obj[ (j+1) % 4 ];
956  jevois::rawimage::drawLine(outimg, int(p0.x + 0.5F), int(p0.y + 0.5F),
957  int(p1.x + 0.5F), int(p1.y + 0.5F), 1, jevois::yuyv::LightPink);
958  //jevois::rawimage::writeText(outimg, std::to_string(j),
959  // int(p0.x + 0.5F), int(p0.y + 0.5F), jevois::yuyv::White);
960  }
961  }
962  }
963  else
964  {
965  // Show trihedron and parallelepiped centered on object:
966  float const hw = w * 0.5F, hh = h * 0.5F, dd = -0.5F * std::max(w, h);
967 
968  for (int i = 0; i < nobj; ++i)
969  {
970  // Project axis points:
971  std::vector<cv::Point3f> axisPoints;
972  axisPoints.push_back(cv::Point3f(0.0F, 0.0F, 0.0F));
973  axisPoints.push_back(cv::Point3f(hw, 0.0F, 0.0F));
974  axisPoints.push_back(cv::Point3f(0.0F, hh, 0.0F));
975  axisPoints.push_back(cv::Point3f(0.0F, 0.0F, dd));
976 
977  std::vector<cv::Point2f> imagePoints;
978  cv::projectPoints(axisPoints, rvecs[i], tvecs[i], itsCamMatrix, itsDistCoeffs, imagePoints);
979 
980  // Draw axis lines:
981  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
982  int(imagePoints[1].x + 0.5F), int(imagePoints[1].y + 0.5F),
983  2, jevois::yuyv::MedPurple);
984  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
985  int(imagePoints[2].x + 0.5F), int(imagePoints[2].y + 0.5F),
986  2, jevois::yuyv::MedGreen);
987  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
988  int(imagePoints[3].x + 0.5F), int(imagePoints[3].y + 0.5F),
989  2, jevois::yuyv::MedGrey);
990 
991  // Also draw a parallelepiped:
992  std::vector<cv::Point3f> cubePoints;
993  cubePoints.push_back(cv::Point3f(-hw, -hh, 0.0F));
994  cubePoints.push_back(cv::Point3f(hw, -hh, 0.0F));
995  cubePoints.push_back(cv::Point3f(hw, hh, 0.0F));
996  cubePoints.push_back(cv::Point3f(-hw, hh, 0.0F));
997  cubePoints.push_back(cv::Point3f(-hw, -hh, dd));
998  cubePoints.push_back(cv::Point3f(hw, -hh, dd));
999  cubePoints.push_back(cv::Point3f(hw, hh, dd));
1000  cubePoints.push_back(cv::Point3f(-hw, hh, dd));
1001 
1002  std::vector<cv::Point2f> cuf;
1003  cv::projectPoints(cubePoints, rvecs[i], tvecs[i], itsCamMatrix, itsDistCoeffs, cuf);
1004 
1005  // Round all the coordinates:
1006  std::vector<cv::Point> cu;
1007  for (auto const & p : cuf) cu.push_back(cv::Point(int(p.x + 0.5F), int(p.y + 0.5F)));
1008 
1009  // Draw parallelepiped lines:
1010  jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[1].x, cu[1].y, 1, jevois::yuyv::LightGreen);
1011  jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[2].x, cu[2].y, 1, jevois::yuyv::LightGreen);
1012  jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[3].x, cu[3].y, 1, jevois::yuyv::LightGreen);
1013  jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[0].x, cu[0].y, 1, jevois::yuyv::LightGreen);
1014  jevois::rawimage::drawLine(outimg, cu[4].x, cu[4].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
1015  jevois::rawimage::drawLine(outimg, cu[5].x, cu[5].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
1016  jevois::rawimage::drawLine(outimg, cu[6].x, cu[6].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
1017  jevois::rawimage::drawLine(outimg, cu[7].x, cu[7].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
1018  jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
1019  jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
1020  jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
1021  jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
1022  }
1023  }
1024  }
1025 };
1026 
1027 // Allow the module to be loaded as a shared object (.so) file:
std::vector< cv::Point > approx
Smoothed approximation of the contour.
Definition: FirstVision.C:347
Simple color-based detection of a U-shaped object for FIRST Robotics.
Definition: FirstVision.C:289
std::string warnAndIgnoreException()
void sendSerialContour2D(unsigned int camw, unsigned int camh, std::vector< cv::Point_< T > > points, std::string const &id="", std::string const &extra="")
cv::Mat itsErodeElement
Erosion and dilation kernels shared across all detect threads.
Definition: FirstVision.C:361
bool itsCueChanged
True when users change ranges.
Definition: FirstVision.C:297
void learnHSV(size_t nthreads, cv::Mat const &imgbgr, jevois::RawImage *outimg=nullptr)
Learn and update our HSV ranges.
Definition: FirstVision.C:684
void updateHSV(size_t nthreads)
Initialize (e.g., if user changes cue params) or update our HSV detection ranges. ...
Definition: FirstVision.C:600
cv::Mat itsDistCoeffs
Our camera distortion coefficients.
Definition: FirstVision.C:296
void writeText(RawImage &img, std::string const &txt, int x, int y, unsigned int col, Font font=Font6x10)
cv::Mat convertToCvBGR(RawImage const &src)
virtual void process(jevois::InputFrame &&inframe, jevois::OutputFrame &&outframe) override
Processing function, with USB video output.
Definition: FirstVision.C:854
unsigned int height
void onParamChange(scue const &param, unsigned char const &newval)
Definition: FirstVision.C:300
float siv
Mean and sigma for V.
Definition: FirstVision.C:337
void drawLine(RawImage &img, int x1, int y1, int x2, int y2, unsigned int thick, unsigned int col)
void estimatePose(std::vector< std::vector< cv::Point2f > > &corners, cv::OutputArray _rvecs, cv::OutputArray _tvecs)
Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners...
Definition: FirstVision.C:415
STL namespace.
std::string sformat(char const *fmt,...) __attribute__((format(__printf__
virtual ~FirstVision()
Virtual destructor for safe inheritance.
Definition: FirstVision.C:409
unsigned int fmt
cv::Scalar rmin() const
Get minimum triplet for use by cv::inRange()
Definition: FirstVision.C:328
std::mutex itsDetMtx
Definition: FirstVision.C:355
void detect(cv::Mat const &imghsv, size_t tnum, int dispx=3, int dispy=242, jevois::RawImage *outimg=nullptr)
HSV object detector, we run several of those in parallel with different hsvcue settings.
Definition: FirstVision.C:467
void loadCameraCalibration(unsigned int w, unsigned int h)
Load camera calibration parameters.
Definition: FirstVision.C:448
void sendAllSerial(int w, int h, std::vector< std::vector< cv::Point2f > > const &corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Send serial messages about each detection:
Definition: FirstVision.C:743
void fix()
Fix ranges so they don&#39;t go out of bounds.
Definition: FirstVision.C:320
Helper struct for an HSV range triplet, where each range is specified as a mean and sigma: ...
Definition: FirstVision.C:308
JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(dataroot, std::string, "Root path for data, config, and weight files. " "If empty, use the module's path.", JEVOIS_SHARE_PATH "/darknet/single", ParamCateg)
Parameter.
StdModule(std::string const &instance)
SinglePoseEstimationParallel(cv::Mat &_objPoints, cv::InputArrayOfArrays _corners, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::Mat &_rvecs, cv::Mat &_tvecs)
Definition: FirstVision.C:369
std::vector< cv::Point > contour
The full detailed contour.
Definition: FirstVision.C:346
void onParamChange(hcue const &param, unsigned char const &newval)
Definition: FirstVision.C:299
hsvcue(unsigned char h, unsigned char s, unsigned char v)
Constructor.
Definition: FirstVision.C:311
void drawDetections(jevois::RawImage &outimg, std::vector< std::vector< cv::Point2f > > corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Definition: FirstVision.C:937
double area(const std::vector< Point2D< T > > &polygon, const bool getsigned=false)
What is the area of a polygon?
Definition: Point2D.H:422
float sih
Mean and sigma for H.
Definition: FirstVision.C:335
float sis
Mean and sigma for S.
Definition: FirstVision.C:336
std::vector< detection > itsDetections
Our detections, combined across all threads.
Definition: FirstVision.C:354
void onParamChange(vcue const &param, unsigned char const &newval)
Definition: FirstVision.C:301
virtual void process(jevois::InputFrame &&inframe) override
Processing function, no USB video output.
Definition: FirstVision.C:796
void cleanupDetections()
Clean up the detections by eliminating duplicates:
Definition: FirstVision.C:645
std::string const & stop()
cv::Scalar rmax() const
Get maximum triplet for use by cv::inRange()
Definition: FirstVision.C:332
void freeze()
void operator()(cv::Range const &range) const
Definition: FirstVision.C:376
JEVOIS_DECLARE_PARAMETER(camparams, std::string, "File stem of camera parameters, or empty. Camera resolution " "will be appended, as well as a .cfg extension. For example, specifying 'camera_para' " "here and running the camera sensor at 320x240 will attempt to load " "camera_para320x240.dat from within the module's directory.", "camera_para", ParamCateg)
Parameter.
std::vector< cv::Point > hull
Convex hull of the contour.
Definition: FirstVision.C:348
void sendSerialStd3D(float x, float y, float z, float w=0.0F, float h=0.0F, float d=0.0F, float q1=0.0F, float q2=0.0F, float q3=0.0f, float q4=0.0F, std::string const &id="", std::string const &extra="")
#define LFATAL(msg)
std::shared_ptr< Kalman1D > itsKalS
Definition: FirstVision.C:358
void drawFilledRect(RawImage &img, int x, int y, unsigned int w, unsigned int h, unsigned int col)
std::shared_ptr< Kalman1D > itsKalV
Definition: FirstVision.C:358
void pasteGreyToYUYV(cv::Mat const &src, RawImage &dest, int dx, int dy)
std::string to_string(T const &val)
float serr
Shape error score (higher for rougher contours with defects)
Definition: FirstVision.C:350
size_t threadnum
Thread number that detected this object.
Definition: FirstVision.C:349
void updateStructuringElements()
Update the morphology structuring elements if needed.
Definition: FirstVision.C:777
std::shared_ptr< Kalman1D > itsKalH
Kalman filters to learn and adapt HSV windows over time.
Definition: FirstVision.C:358
JEVOIS_REGISTER_MODULE(FirstVision)
cv::Mat itsCamMatrix
Our camera matrix.
Definition: FirstVision.C:295
#define LINFO(msg)
std::vector< hsvcue > itsHSV
Definition: FirstVision.C:340
Helper struct for a detected object.
Definition: FirstVision.C:344
unsigned int width
cv::Mat itsDilateElement
Definition: FirstVision.C:361
ParallelLoopBody class for the parallelization of the single markers pose estimation.
Definition: FirstVision.C:366
void paste(RawImage const &src, RawImage &dest, int dx, int dy)
FirstVision(std::string const &instance)
Constructor.
Definition: FirstVision.C:400
hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig, unsigned char v, unsigned char vsig)
Constructor.
Definition: FirstVision.C:315
void require(char const *info, unsigned int w, unsigned int h, unsigned int f) const