JeVoisBase  1.10
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
FirstVision.C
Go to the documentation of this file.
1 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 //
3 // JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
4 // California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5 //
6 // This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7 // redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8 // Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 // without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10 // License for more details. You should have received a copy of the GNU General Public License along with this program;
11 // if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 //
13 // Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14 // Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16 /*! \file */
17 
18 #include <jevois/Core/Module.H>
19 #include <jevois/Debug/Log.H>
20 #include <jevois/Util/Utils.H>
22 #include <jevois/Debug/Timer.H>
24 
26 
27 #include <opencv2/core/core.hpp>
28 #include <opencv2/imgproc/imgproc.hpp>
29 #include <opencv2/calib3d/calib3d.hpp>
30 
31 #include <Eigen/Geometry> // for AngleAxis and Quaternion
32 
33 // REMINDER: make sure you understand the viral nature and terms of the above license. If you are writing code derived
34 // from this file, you must offer your source under the GPL license too.
35 
36 static jevois::ParameterCategory const ParamCateg("FirstVision Options");
37 
38 //! Parameter \relates FirstVision
39 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(hcue, unsigned char, "Initial cue for target hue (0=red/do not use because of "
40  "wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan, "
41  "105=light blue, 120=blue, 135=purple, 150=pink)",
42  45, jevois::Range<unsigned char>(0, 179), ParamCateg);
43 
44 //! Parameter \relates FirstVision
45 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(scue, unsigned char, "Initial cue for target saturation lower bound",
46  50, ParamCateg);
47 
48 //! Parameter \relates FirstVision
49 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(vcue, unsigned char, "Initial cue for target value (brightness) lower bound",
50  200, ParamCateg);
51 
52 //! Parameter \relates FirstVision
53 JEVOIS_DECLARE_PARAMETER(maxnumobj, size_t, "Max number of objects to declare a clean image. If more blobs are "
54  "detected in a frame, we skip that frame before we even try to analyze shapes of the blobs",
55  100, ParamCateg);
56 
57 //! Parameter \relates FirstVision
58 JEVOIS_DECLARE_PARAMETER(hullarea, jevois::Range<unsigned int>, "Range of object area (in pixels) to track. Use this "
59  "if you want to skip shape analysis of very large or very small blobs",
60  jevois::Range<unsigned int>(20*20, 300*300), ParamCateg);
61 
62 //! Parameter \relates FirstVision
63 JEVOIS_DECLARE_PARAMETER(hullfill, int, "Max fill ratio of the convex hull (percent). Lower values mean your shape "
64  "occupies a smaller fraction of its convex hull. This parameter sets an upper bound, "
65  "fuller shapes will be rejected.",
66  50, jevois::Range<int>(1, 100), ParamCateg);
67 
68 //! Parameter \relates FirstVision
69 JEVOIS_DECLARE_PARAMETER(erodesize, size_t, "Erosion structuring element size (pixels), or 0 for no erosion",
70  2, ParamCateg);
71 
72 //! Parameter \relates FirstVision
73 JEVOIS_DECLARE_PARAMETER(dilatesize, size_t, "Dilation structuring element size (pixels), or 0 for no dilation",
74  4, ParamCateg);
75 
76 //! Parameter \relates FirstVision
77 JEVOIS_DECLARE_PARAMETER(epsilon, double, "Shape smoothing factor (higher for smoother). Shape smoothing is applied "
78  "to remove small contour defects before the shape is analyzed.",
79  0.015, jevois::Range<double>(0.001, 0.999), ParamCateg);
80 
81 //! Parameter \relates FirstVision
82 JEVOIS_DECLARE_PARAMETER(debug, bool, "Show contours of all object candidates if true",
83  false, ParamCateg);
84 
85 //! Parameter \relates FirstVision
86 JEVOIS_DECLARE_PARAMETER(threads, size_t, "Number of parallel vision processing threads. Thread 0 uses the HSV values "
87  "provided by user parameters; thread 1 broadens that fixed range a bit; threads 2-3 use a "
88  "narrow and broader learned HSV window over time",
89  4, jevois::Range<size_t>(2, 4), ParamCateg);
90 
91 //! Parameter \relates FirstVision
92 JEVOIS_DECLARE_PARAMETER(showthread, size_t, "Thread number that is used to display HSV-thresholded image",
93  0, jevois::Range<size_t>(0, 3), ParamCateg);
94 
95 //! Parameter \relates FirstVision
96 JEVOIS_DECLARE_PARAMETER(ethresh, double, "Shape error threshold (lower is stricter for exact shape)",
97  900.0, jevois::Range<double>(0.01, 1000.0), ParamCateg);
98 
99 //! Parameter \relates FirstVision
100 JEVOIS_DECLARE_PARAMETER(dopose, bool, "Compute (and show) 6D object pose, requires a valid camera calibration. "
101  "When dopose is true, 3D serial messages are sent out, otherwise 2D serial messages.",
102  true, ParamCateg);
103 
104 //! Parameter \relates FirstVision
105 JEVOIS_DECLARE_PARAMETER(camparams, std::string, "File stem of camera parameters, or empty. Camera resolution "
106  "will be appended, as well as a .yaml extension. For example, specifying 'calibration' "
107  "here and running the camera sensor at 320x240 will attempt to load "
108  "calibration320x240.yaml from within directory " JEVOIS_SHARE_PATH "/camera/",
109  "calibration", ParamCateg);
110 
111 //! Parameter \relates FirstVision
112 JEVOIS_DECLARE_PARAMETER(iou, double, "Intersection-over-union ratio over which duplicates are eliminated",
113  0.3, jevois::Range<double>(0.01, 0.99), ParamCateg);
114 
115 //! Parameter \relates FirstVision
116 JEVOIS_DECLARE_PARAMETER(objsize, cv::Size_<float>, "Object size (in meters)",
117  cv::Size_<float>(0.28F, 0.175F), ParamCateg);
118 
119 //! Parameter \relates FirstVision
120 JEVOIS_DECLARE_PARAMETER(margin, size_t, "Margin from from frame borders (pixels). If any corner of a detected shape "
121  "gets closer than the margin to the frame borders, the shape will be rejected. This is to "
122  "avoid possibly bogus 6D pose estimation when the shape starts getting truncated as it "
123  "partially exits the camera's field of view.",
124  5, ParamCateg);
125 
126 //! Simple color-based detection of a U-shaped object for FIRST Robotics
127 /*! This module isolates pixels within a given HSV range (hue, saturation, and value of color pixels), does some
128  cleanups, and extracts object contours. It is looking for a rectangular U shape of a specific size (set by parameter
129  \p objsize). See screenshots for an example of shape. It sends information about detected objects over serial.
130 
131  This module usually works best with the camera sensor set to manual exposure, manual gain, manual color balance, etc
132  so that HSV color values are reliable. See the file \b script.cfg file in this module's directory for an example of
133  how to set the camera settings each time this module is loaded.
134 
135  This code was loosely inspired by the JeVois \jvmod{ObjectTracker} module. Also see \jvmod{FirstPython} for a
136  simplified version of this module, written in Python.
137 
138  This module is provided for inspiration. It has no pretension of actually solving the FIRST Robotics vision problem
139  in a complete and reliable way. It is released in the hope that FRC teams will try it out and get inspired to
140  develop something much better for their own robot.
141 
142  General pipeline
143  ----------------
144 
145  The basic idea of this module is the classic FIRST robotics vision pipeline: first, select a range of pixels in HSV
146  color pixel space likely to include the object. Then, detect contours of all blobs in range. Then apply some tests
147  on the shape of the detected blobs, their size, fill ratio (ratio of object area compared to its convex hull's
148  area), etc. Finally, estimate the location and pose of the object in the world.
149 
150  In this module, we run up to 4 pipelines in parallel, using different settings for the range of HSV pixels
151  considered:
152 
153  - Pipeline 0 uses the HSV values provided by user parameters;
154  - Pipeline 1 broadens that fixed range a bit;
155  - Pipelines 2-3 use a narrow and broader learned HSV window over time.
156 
157  Detections from all 4 pipelines are considered for overlap and quality (raggedness of their outlines), and only the
158  cleanest of several overlapping detections is preserved. From those cleanest detections, pipelines 2-3 learn and
159  adapt the HSV range for future video frames.
160 
161  Using this module
162  -----------------
163 
164  Check out [this tutorial](http://jevois.org/tutorials/UserFirstVision.html).
165 
166  Detection and quality control steps
167  -----------------------------------
168 
169  The following messages appear for each of the 4 pipelines, at the bottom of the demo video, to help users figure out
170  why their object may not be detected:
171 
172  - T0 to T3: thread (pipeline) number
173  - H=..., S=..., V=...: HSV range considered by that thread
174  - N=...: number of raw blobs detected in that range
175  - Because N blobs are considered in each thread from this point on, information about only the one that progressed
176  the farthest through a series of tests is shown. One letter is added each time a test is passed:
177  + H: the convex hull of the blob is quadrilateral (4 vertices)
178  + A: hull area is within range specified by parameter \p hullarea
179  + F: object to hull fill ratio is below the limit set by parameter \p hullfill (i.e., object is not a solid,
180  filled quadrilateral shape)
181  + S: the object has 8 vertices after shape smoothing to eliminate small shape defects (a U shape is
182  indeed expected to have 8 vertices).
183  + E: The shape discrepency between the original shape and the smoothed shape is acceptable per parameter
184  \p ethresh, i.e., the original contour did not have a lot of defects.
185  + M: the shape is not too close to the borders of the image, per parameter \p margin, i.e., it is unlikely to
186  be truncated as the object partially exits the camera's field of view.
187  + V: Vectors describing the shape as it related to its convex hull are non-zero, i.e., the centroid of the shape
188  is not exactly coincident with the centroid of its convex hull, as we would expect for a U shape.
189  + U: the shape is roughly upright; upside-down U shapes are rejected as likely spurious.
190  + OK: this thread detected at least one shape that passed all the tests.
191 
192  The black and white picture at right shows the pixels in HSV range for the thread determined by parameter \p
193  showthread (with value 0 by default).
194 
195  Serial Messages
196  ---------------
197 
198  This module can send standardized serial messages as described in \ref UserSerialStyle. One message is issued on
199  every video frame for each detected and good object. The \p id field in the messages simply is \b FIRST for all
200  messages.
201 
202  When \p dopose is turned on, 3D messages will be sent, otherwise 2D messages.
203 
204  2D messages when \p dopose is off:
205 
206  - Serial message type: \b 2D
207  - `id`: always `FIRST`
208  - `x`, `y`, or vertices: standardized 2D coordinates of object center or corners
209  - `w`, `h`: standardized marker size
210  - `extra`: none (empty string)
211 
212  3D messages when \p dopose is on:
213 
214  - Serial message type: \b 3D
215  - `id`: always `FIRST`
216  - `x`, `y`, `z`, or vertices: 3D coordinates in millimeters of object center, or corners
217  - `w`, `h`, `d`: object size in millimeters, a depth of 1mm is always used
218  - `extra`: none (empty string)
219 
220  NOTE: 3D pose estimation from low-resolution 176x144 images at 120fps can be quite noisy. Make sure you tune your
221  HSV ranges very well if you want to operate at 120fps (see below). To operate more reliably at very low resolutions,
222  one may want to improve this module by adding subpixel shape refinement and tracking across frames.
223 
224  See \ref UserSerialStyle for more on standardized serial messages, and \ref coordhelpers for more info on
225  standardized coordinates.
226 
227  Trying it out
228  -------------
229 
230  The default parameter settings (which are set in \b script.cfg explained below) attempt to detect yellow-green
231  objects. Present an object to the JeVois camera and see whether it is detected. When detected and good
232  enough according to a number of quality control tests, the outline of the object is drawn.
233 
234  For further use of this module, you may want to check out the following tutorials:
235 
236  - [Using the sample FIRST Robotics vision module](http://jevois.org/tutorials/UserFirstVision.html)
237  - [Tuning the color-based object tracker using a python graphical
238  interface](http://jevois.org/tutorials/UserColorTracking.html)
239  - [Making a motorized pan-tilt head for JeVois and tracking
240  objects](http://jevois.org/tutorials/UserPanTilt.html)
241  - \ref ArduinoTutorial
242 
243  Tuning
244  ------
245 
246  You need to provide the exact width and height of your physical shape to parameter \p objsize for this module to
247  work. It will look for a shape of that physical size (though at any distance and orientation from the camera). Be
248  sure you edit \b script.cfg and set the parameter \p objsize in there to the true measured physical size of your
249  shape.
250 
251  You should adjust parameters \p hcue, \p scue, and \p vcue to isolate the range of Hue, Saturation, and Value
252  (respectively) that correspond to the objects you want to detect. Note that there is a \b script.cfg file in this
253  module's directory that provides a range tuned to a light yellow-green object, as shown in the demo screenshot.
254 
255  Tuning the parameters is best done interactively by connecting to your JeVois camera while it is looking at some
256  object of the desired color. Once you have achieved a tuning, you may want to set the hcue, scue, and vcue
257  parameters in your \b script.cfg file for this module on the microSD card (see below).
258 
259  Typically, you would start by narrowing down on the hue, then the value, and finally the saturation. Make sure you
260  also move your camera around and show it typical background clutter so check for false positives (detections of
261  things which you are not interested, which can happen if your ranges are too wide).
262 
263  Config file
264  -----------
265 
266  JeVois allows you to store parameter settings and commands in a file named \b script.cfg stored in the directory of
267  a module. The file \b script.cfg may contain any sequence of commands as you would type them interactively in the
268  JeVois command-line interface. For the \jvmod{FirstVision} module, a default script is provided that sets the camera
269  to manual color, gain, and exposure mode (for more reliable color values), and other example parameter values.
270 
271  The \b script.cfg file for \jvmod{FirstVision} is stored on your microSD at
272  <b>JEVOIS:/modules/JeVois/FirstVision/script.cfg</b>
273 
274  @author Laurent Itti
275 
276  @videomapping YUYV 176 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
277  @videomapping YUYV 352 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
278  @videomapping YUYV 320 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
279  @videomapping YUYV 640 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
280  @videomapping NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstVision
281  @videomapping NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstVision
282  @email itti\@usc.edu
283  @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
284  @copyright Copyright (C) 2017 by Laurent Itti, iLab and the University of Southern California
285  @mainurl http://jevois.org
286  @supporturl http://jevois.org/doc
287  @otherurl http://iLab.usc.edu
288  @license GPL v3
289  @distribution Unrestricted
290  @restrictions None
291  \ingroup modules */
293  public jevois::Parameter<hcue, scue, vcue, maxnumobj, hullarea, hullfill, erodesize,
294  dilatesize, epsilon, debug, threads, showthread, ethresh,
295  dopose, camparams, iou, objsize, margin>
296 {
297  protected:
298  cv::Mat itsCamMatrix; //!< Our camera matrix
299  cv::Mat itsDistCoeffs; //!< Our camera distortion coefficients
300  bool itsCueChanged = true; //!< True when users change ranges
301 
302  void onParamChange(hcue const & param, unsigned char const & newval) { itsCueChanged = true; }
303  void onParamChange(scue const & param, unsigned char const & newval) { itsCueChanged = true; }
304  void onParamChange(vcue const & param, unsigned char const & newval) { itsCueChanged = true; }
305 
306  // ####################################################################################################
307  //! Helper struct for an HSV range triplet, where each range is specified as a mean and sigma:
308  /*! Note that sigma is used differently for H, S, and V, under the assumption that we want to track a bright target:
309  For H, the range is [mean-sigma .. mean+sigma]. For S and V, the range is [mean-sigma .. 255]. See rmin() and
310  rmax() for details. */
311  struct hsvcue
312  {
313  //! Constructor
314  hsvcue(unsigned char h, unsigned char s, unsigned char v) : muh(h), sih(30), mus(s), sis(20), muv(v), siv(20)
315  { fix(); }
316 
317  //! Constructor
318  hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig,
319  unsigned char v, unsigned char vsig) : muh(h), sih(hsig), mus(s), sis(ssig), muv(v), siv(vsig)
320  { fix(); }
321 
322  //! Fix ranges so they don't go out of bounds
323  void fix()
324  {
325  muh = std::min(179.0F, std::max(1.0F, muh)); sih = std::max(1.0F, std::min(sih, 360.0F));
326  mus = std::min(254.0F, std::max(1.0F, mus)); sis = std::max(1.0F, std::min(sis, 512.0F));
327  muv = std::min(254.0F, std::max(1.0F, muv)); siv = std::max(1.0F, std::min(siv, 512.0F));
328  }
329 
330  //! Get minimum triplet for use by cv::inRange()
331  cv::Scalar rmin() const
332  { return cv::Scalar(std::max(0.0F, muh - sih), std::max(0.0F, mus - sis), std::max(0.0F, muv - siv)); }
333 
334  //! Get maximum triplet for use by cv::inRange()
335  cv::Scalar rmax() const
336  { return cv::Scalar(std::min(179.0F, muh + sih), 255, 255); }
337 
338  float muh, sih; //!< Mean and sigma for H
339  float mus, sis; //!< Mean and sigma for S
340  float muv, siv; //!< Mean and sigma for V
341  };
342 
343  std::vector<hsvcue> itsHSV;
344 
345  // ####################################################################################################
346  //! Helper struct for a detected object
347  struct detection
348  {
349  std::vector<cv::Point> contour; //!< The full detailed contour
350  std::vector<cv::Point> approx; //!< Smoothed approximation of the contour
351  std::vector<cv::Point> hull; //!< Convex hull of the contour
352  size_t threadnum; //!< Thread number that detected this object
353  float serr; //!< Shape error score (higher for rougher contours with defects)
354  };
355 
356  //! Our detections, combined across all threads
357  std::vector<detection> itsDetections;
358  std::mutex itsDetMtx;
359 
360  //! Kalman filters to learn and adapt HSV windows over time
361  std::shared_ptr<Kalman1D> itsKalH, itsKalS, itsKalV;
362 
363  //! Erosion and dilation kernels shared across all detect threads
365 
366  // ####################################################################################################
367  //! ParallelLoopBody class for the parallelization of the single markers pose estimation
368  /*! Derived from opencv_contrib ArUco module, it's just a simple solvePnP inside. */
369  class SinglePoseEstimationParallel : public cv::ParallelLoopBody
370  {
371  public:
372  SinglePoseEstimationParallel(cv::Mat & _objPoints, cv::InputArrayOfArrays _corners,
373  cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
374  cv::Mat & _rvecs, cv::Mat & _tvecs) :
375  objPoints(_objPoints), corners(_corners), cameraMatrix(_cameraMatrix),
376  distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs)
377  { }
378 
379  void operator()(cv::Range const & range) const
380  {
381  int const begin = range.start;
382  int const end = range.end;
383 
384  for (int i = begin; i < end; ++i)
385  cv::solvePnP(objPoints, corners.getMat(i), cameraMatrix, distCoeffs,
386  rvecs.at<cv::Vec3d>(i), tvecs.at<cv::Vec3d>(i));
387  }
388 
389  private:
390  cv::Mat & objPoints;
391  cv::InputArrayOfArrays corners;
392  cv::InputArray cameraMatrix, distCoeffs;
393  cv::Mat & rvecs, tvecs;
394  };
395 
396  // ####################################################################################################
397  // ####################################################################################################
398  // ####################################################################################################
399 
400  public:
401  // ####################################################################################################
402  //! Constructor
403  FirstVision(std::string const & instance) : jevois::StdModule(instance)
404  {
405  itsKalH = addSubComponent<Kalman1D>("kalH");
406  itsKalS = addSubComponent<Kalman1D>("kalS");
407  itsKalV = addSubComponent<Kalman1D>("kalV");
408  }
409 
410  // ####################################################################################################
411  //! Virtual destructor for safe inheritance
412  virtual ~FirstVision() { }
413 
414  // ####################################################################################################
415  //! Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners
416  /*! Inspired from the ArUco module of opencv_contrib
417  The corners array is always filled, but rvecs and tvecs only are if dopose is true */
418  void estimatePose(std::vector<std::vector<cv::Point2f> > & corners, cv::OutputArray _rvecs,
419  cv::OutputArray _tvecs)
420  {
421  auto const osiz = objsize::get();
422 
423  // Get a vector of all our corners so we can map them to 3D and draw them:
424  corners.clear();
425  for (detection const & d : itsDetections)
426  {
427  corners.push_back(std::vector<cv::Point2f>());
428  std::vector<cv::Point2f> & v = corners.back();
429  for (auto const & p : d.hull) v.push_back(cv::Point2f(p));
430  }
431 
432  if (dopose::get())
433  {
434  // set coordinate system in the middle of the object, with Z pointing out
435  cv::Mat objPoints(4, 1, CV_32FC3);
436  objPoints.ptr< cv::Vec3f >(0)[0] = cv::Vec3f(-osiz.width * 0.5F, -osiz.height * 0.5F, 0);
437  objPoints.ptr< cv::Vec3f >(0)[1] = cv::Vec3f(-osiz.width * 0.5F, osiz.height * 0.5F, 0);
438  objPoints.ptr< cv::Vec3f >(0)[2] = cv::Vec3f(osiz.width * 0.5F, osiz.height * 0.5F, 0);
439  objPoints.ptr< cv::Vec3f >(0)[3] = cv::Vec3f(osiz.width * 0.5F, -osiz.height * 0.5F, 0);
440 
441  int nobj = (int)corners.size();
442  _rvecs.create(nobj, 1, CV_64FC3); _tvecs.create(nobj, 1, CV_64FC3);
443  cv::Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
444  cv::parallel_for_(cv::Range(0, nobj), SinglePoseEstimationParallel(objPoints, corners, itsCamMatrix,
445  itsDistCoeffs, rvecs, tvecs));
446  }
447  }
448 
449  // ####################################################################################################
450  //! Load camera calibration parameters
451  void loadCameraCalibration(unsigned int w, unsigned int h)
452  {
454 
455  std::string const cpf = std::string(JEVOIS_SHARE_PATH) + "/camera/" + camparams::get() +
456  std::to_string(w) + 'x' + std::to_string(h) + ".yaml";
457 
458  cv::FileStorage fs(cpf, cv::FileStorage::READ);
459  if (fs.isOpened())
460  {
461  fs["camera_matrix"] >> itsCamMatrix;
462  fs["distortion_coefficients"] >> itsDistCoeffs;
463  LINFO("Loaded camera calibration from " << cpf);
464  }
465  else LFATAL("Failed to read camera parameters from file [" << cpf << "]");
466  }
467 
468  // ####################################################################################################
469  //! HSV object detector, we run several of those in parallel with different hsvcue settings
470  void detect(cv::Mat const & imghsv, size_t tnum, int dispx = 3, int dispy = 242, jevois::RawImage *outimg = nullptr)
471  {
472  // Threshold the HSV image to only keep pixels within the desired HSV range:
473  cv::Mat imgth;
474  hsvcue const & hsv = itsHSV[tnum]; cv::Scalar const rmin = hsv.rmin(), rmax = hsv.rmax();
475  cv::inRange(imghsv, rmin, rmax, imgth);
476  std::string str = jevois::sformat("T%zu: H=%03d-%03d S=%03d-%03d V=%03d-%03d ", tnum, int(rmin.val[0]),
477  int(rmax.val[0]), int(rmin.val[1]), int(rmax.val[1]),
478  int(rmin.val[2]), int(rmax.val[2]));
479 
480  // Apply morphological operations to cleanup the image noise:
481  if (itsErodeElement.empty() == false) cv::erode(imgth, imgth, itsErodeElement);
482  if (itsDilateElement.empty() == false) cv::dilate(imgth, imgth, itsDilateElement);
483 
484  // Detect objects by finding contours:
485  std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy;
486  cv::findContours(imgth, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
487  str += jevois::sformat("N=%03d ", hierarchy.size());
488 
489  double const epsi = epsilon::get();
490  int const m = margin::get();
491 
492  // Identify the "good" objects:
493  std::string str2, beststr2;
494  if (hierarchy.size() > 0 && hierarchy.size() <= maxnumobj::get())
495  {
496  for (int index = 0; index >= 0; index = hierarchy[index][0])
497  {
498  // Keep track of our best detection so far:
499  if (str2.length() > beststr2.length()) beststr2 = str2;
500  str2.clear();
501 
502  // Let's examine this contour:
503  std::vector<cv::Point> const & c = contours[index];
504  detection d;
505 
506  // Compute contour area:
507  double const area = cv::contourArea(c, false);
508 
509  // Compute convex hull:
510  std::vector<cv::Point> rawhull;
511  cv::convexHull(c, rawhull, true);
512  double const rawhullperi = cv::arcLength(rawhull, true);
513  cv::approxPolyDP(rawhull, d.hull, epsi * rawhullperi * 3.0, true);
514 
515  // Is it the right shape?
516  if (d.hull.size() != 4) continue; // 4 vertices for the rectangular convex outline (shows as a trapezoid)
517  str2 += "H"; // Hull is quadrilateral
518 
519  double const huarea = cv::contourArea(d.hull, false);
520  if ( ! hullarea::get().contains(int(huarea + 0.4999))) continue;
521  str2 += "A"; // Hull area ok
522 
523  int const hufill = int(area / huarea * 100.0 + 0.4999);
524  if (hufill > hullfill::get()) continue;
525  str2 += "F"; // Fill is ok
526 
527  // Check object shape:
528  double const peri = cv::arcLength(c, true);
529  cv::approxPolyDP(c, d.approx, epsi * peri, true);
530  if (d.approx.size() < 7 || d.approx.size() > 9) continue; // 8 vertices for a U shape
531  str2 += "S"; // Shape is ok
532 
533  // Compute contour serr:
534  d.serr = 100.0 * cv::matchShapes(c, d.approx, cv::CONTOURS_MATCH_I1, 0.0);
535  if (d.serr > ethresh::get()) continue;
536  str2 += "E"; // Shape error is ok
537 
538  // Reject the shape if any of its vertices gets within the margin of the image bounds. This is to avoid
539  // getting grossly incorrect 6D pose estimates as the shape starts getting truncated as it partially exits the
540  // camera field of view:
541  bool reject = false;
542  for (int i = 0; i < c.size(); ++i)
543  if (c[i].x < m || c[i].x >= imghsv.cols - m || c[i].y < m || c[i].y >= imghsv.rows - m)
544  { reject = true; break; }
545  if (reject) continue;
546  str2 += "M"; // Margin ok
547 
548  // Re-order the 4 points in the hull if needed: In the pose estimation code, we will assume vertices ordered
549  // as follows:
550  //
551  // 0| |3
552  // | |
553  // | |
554  // 1----------2
555 
556  // v10+v23 should be pointing outward the U more than v03+v12 is:
557  std::complex<float> v10p23(float(d.hull[0].x - d.hull[1].x + d.hull[3].x - d.hull[2].x),
558  float(d.hull[0].y - d.hull[1].y + d.hull[3].y - d.hull[2].y));
559  float const len10p23 = std::abs(v10p23);
560  std::complex<float> v03p12(float(d.hull[3].x - d.hull[0].x + d.hull[2].x - d.hull[1].x),
561  float(d.hull[3].y - d.hull[0].y + d.hull[2].y - d.hull[1].y));
562  float const len03p12 = std::abs(v03p12);
563 
564  // Vector from centroid of U shape to centroid of its hull should also point outward of the U:
565  cv::Moments const momC = cv::moments(c);
566  cv::Moments const momH = cv::moments(d.hull);
567  std::complex<float> vCH(momH.m10 / momH.m00 - momC.m10 / momC.m00, momH.m01 / momH.m00 - momC.m01 / momC.m00);
568  float const lenCH = std::abs(vCH);
569 
570  if (len10p23 < 0.1F || len03p12 < 0.1F || lenCH < 0.1F) continue;
571  str2 += "V"; // Shape vectors ok
572 
573  float const good = (v10p23.real() * vCH.real() + v10p23.imag() * vCH.imag()) / (len10p23 * lenCH);
574  float const bad = (v03p12.real() * vCH.real() + v03p12.imag() * vCH.imag()) / (len03p12 * lenCH);
575 
576  // We reject upside-down detections as those are likely to be spurious:
577  if (vCH.imag() >= -2.0F) continue;
578  str2 += "U"; // U shape is upright
579 
580  // Fixup the ordering of the vertices if needed:
581  if (bad > good) { d.hull.insert(d.hull.begin(), d.hull.back()); d.hull.pop_back(); }
582 
583  // This detection is a keeper:
584  str2 += " OK";
585  d.contour = c;
586  std::lock_guard<std::mutex> _(itsDetMtx);
587  itsDetections.push_back(d);
588  }
589  if (str2.length() > beststr2.length()) beststr2 = str2;
590  }
591 
592  // Display any results requested by the users:
593  if (outimg && outimg->valid())
594  {
595  if (tnum == showthread::get() && outimg->width == 2 * imgth.cols)
596  jevois::rawimage::pasteGreyToYUYV(imgth, *outimg, imgth.cols, 0);
597  jevois::rawimage::writeText(*outimg, str + beststr2, dispx, dispy + 12*tnum, jevois::yuyv::White);
598  }
599  }
600 
601  // ####################################################################################################
602  //! Initialize (e.g., if user changes cue params) or update our HSV detection ranges
603  void updateHSV(size_t nthreads)
604  {
605  float const spread = 0.2F;
606 
607  if (itsHSV.empty() || itsCueChanged)
608  {
609  // Initialize or reset because of user parameter change:
610  itsHSV.clear(); itsCueChanged = false;
611  for (size_t i = 0; i < nthreads; ++i)
612  {
613  hsvcue cue(hcue::get(), scue::get(), vcue::get());
614  cue.sih *= (1.0F + spread * i); cue.sis *= (1.0F + spread * i); cue.siv *= (1.0F + spread * i);
615  cue.fix();
616  itsHSV.push_back(cue);
617  }
618  if (nthreads > 2)
619  {
620  itsKalH->set(hcue::get()); itsKalH->get();
621  itsKalS->set(scue::get()); itsKalS->get();
622  itsKalV->set(vcue::get()); itsKalV->get();
623  }
624  }
625  else
626  {
627  // Kalman update:
628  if (nthreads > 2)
629  {
630  itsHSV[2].muh = itsKalH->get();
631  itsHSV[2].mus = itsKalS->get();
632  itsHSV[2].muv = itsKalV->get();
633  itsHSV[2].fix();
634  for (size_t i = 3; i < itsHSV.size(); ++i)
635  {
636  itsHSV[i] = itsHSV[2];
637  itsHSV[i].sih *= (1.0F + spread * i);
638  itsHSV[i].sis *= (1.0F + spread * i);
639  itsHSV[i].siv *= (1.0F + spread * i);
640  itsHSV[i].fix();
641  }
642  }
643  }
644  }
645 
646  // ####################################################################################################
647  //! Clean up the detections by eliminating duplicates:
649  {
650  bool keepgoing = true;
651  double const iouth = iou::get();
652 
653  while (keepgoing)
654  {
655  // We will stop if we do not eliminate any more objects:
656  keepgoing = false; int delidx = -1;
657 
658  // Loop over all pairs of objects:
659  size_t const siz = itsDetections.size();
660  for (size_t i = 0; i < siz; ++i)
661  {
662  for (size_t j = 0; j < i; ++j)
663  {
664  std::vector<cv::Point> pts = itsDetections[i].hull;
665  for (cv::Point const & p : itsDetections[j].hull) pts.push_back(p);
666  std::vector<cv::Point> hull;
667  cv::convexHull(pts, hull); // FIXME should do a true union! this is just an approximation to it
668  double uarea = cv::contourArea(hull);
669  double iarea = cv::contourArea(itsDetections[i].hull) + cv::contourArea(itsDetections[j].hull) - uarea;
670 
671  // note: object detection code guarantees non-zero area:
672  double const inoun = iarea / uarea;
673  if (inoun >= iouth)
674  {
675  if (itsDetections[i].serr > itsDetections[j].serr) delidx = j; else delidx = i;
676  break;
677  }
678  }
679  if (delidx != -1) break;
680  }
681  if (delidx != -1) { itsDetections.erase(itsDetections.begin() + delidx); keepgoing = true; }
682  }
683  }
684 
685  // ####################################################################################################
686  //! Learn and update our HSV ranges
687  void learnHSV(size_t nthreads, cv::Mat const & imgbgr, jevois::RawImage *outimg = nullptr)
688  {
689  int const w = imgbgr.cols, h = imgbgr.rows;
690 
691  // Compute the median filtered BGR image in a thread:
692  cv::Mat medimgbgr;
693  auto median_fut = std::async(std::launch::async, [&](){ cv::medianBlur(imgbgr, medimgbgr, 3); } );
694 
695  // Get all the cleaned-up contours:
696  std::vector<std::vector<cv::Point> > contours;
697  for (detection const & d : itsDetections) contours.push_back(d.contour);
698 
699  // If desired, draw all contours:
700  std::future<void> drawc_fut;
701  if (debug::get() && outimg && outimg->valid())
702  drawc_fut = std::async(std::launch::async, [&]() {
703  // We reinterpret the top portion of our YUYV output image as an opencv 8UC2 image:
704  cv::Mat outuc2(outimg->height, outimg->width, CV_8UC2, outimg->pixelsw<unsigned char>());
705  cv::drawContours(outuc2, contours, -1, jevois::yuyv::LightPink, 2);
706  } );
707 
708  // Draw all the filled contours into a binary mask image:
709  cv::Mat mask(h, w, CV_8UC1, (unsigned char)0);
710  cv::drawContours(mask, contours, -1, 255, -1); // last -1 is for filled
711 
712  // Wait until median filter is done:
713  median_fut.get();
714 
715  // Compute mean and std BGR values inside objects:
716  cv::Mat mean, std;
717  cv::meanStdDev(medimgbgr, mean, std, mask);
718 
719  // Convert to HSV:
720  cv::Mat bgrmean(2, 1, CV_8UC3); bgrmean.at<cv::Vec3b>(0, 0) = mean; bgrmean.at<cv::Vec3b>(1, 0) = std;
721  cv::Mat hsvmean; cv::cvtColor(bgrmean, hsvmean, cv::COLOR_BGR2HSV);
722 
723  cv::Vec3b hsv = hsvmean.at<cv::Vec3b>(0, 0);
724  int H = hsv.val[0], S = hsv.val[1], V = hsv.val[2];
725 
726  cv::Vec3b sighsv = hsvmean.at<cv::Vec3b>(1, 0);
727  int sH = sighsv.val[0], sS = sighsv.val[1], sV = sighsv.val[2];
728 
729  // Set the new measurements:
730  itsKalH->set(H); itsKalS->set(S); itsKalV->set(V);
731 
732  if (nthreads > 2)
733  {
734  float const eta = 0.4F;
735  itsHSV[2].sih = (1.0F - eta) * itsHSV[2].sih + eta * sH;
736  itsHSV[2].sis = (1.0F - eta) * itsHSV[2].sis + eta * sS;
737  itsHSV[2].siv = (1.0F - eta) * itsHSV[2].siv + eta * sV;
738  itsHSV[2].fix();
739  }
740 
741  // note: drawc_fut may block us here until it is complete.
742  }
743 
744  // ####################################################################################################
745  //! Send serial messages about each detection:
746  void sendAllSerial(int w, int h, std::vector<std::vector<cv::Point2f> > const & corners,
747  std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
748  {
749  if (rvecs.empty() == false)
750  {
751  // If we have rvecs and tvecs, we are doing 3D pose estimation, so send a 3D message:
752  auto const osiz = objsize::get();
753  for (size_t i = 0; i < corners.size(); ++i)
754  {
755  std::vector<cv::Point2f> const & curr = corners[i];
756  cv::Vec3d const & rv = rvecs[i];
757  cv::Vec3d const & tv = tvecs[i];
758 
759  // Compute quaternion:
760  float theta = std::sqrt(rv[0] * rv[0] + rv[1] * rv[1] + rv[2] * rv[2]);
761  Eigen::Vector3f axis(rv[0], rv[1], rv[2]);
762  Eigen::Quaternion<float> q(Eigen::AngleAxis<float>(theta, axis));
763 
764  sendSerialStd3D(tv[0], tv[1], tv[2], // position
765  osiz.width, osiz.height, 1.0F, // size
766  q.w(), q.x(), q.y(), q.z(), // pose
767  "FIRST"); // FIRST robotics shape
768  }
769  }
770  else
771  {
772  // Send one 2D message per object:
773  for (size_t i = 0; i < corners.size(); ++i)
774  sendSerialContour2D(w, h, corners[i], "FIRST");
775  }
776  }
777 
778  // ####################################################################################################
779  //! Update the morphology structuring elements if needed
781  {
782  int e = erodesize::get();
783  if (e != itsErodeElement.cols)
784  {
785  if (e) itsErodeElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(e, e));
786  else itsErodeElement.release();
787  }
788 
789  int d = dilatesize::get();
790  if (d != itsDilateElement.cols)
791  {
792  if (d) itsDilateElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(d, d));
793  else itsDilateElement.release();
794  }
795  }
796 
797  // ####################################################################################################
798  //! Processing function, no USB video output
799  virtual void process(jevois::InputFrame && inframe) override
800  {
801  static jevois::Timer timer("processing");
802 
803  // Wait for next available camera image. Any resolution ok:
804  jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
805 
806  timer.start();
807 
808  // Load camera calibration if needed:
809  if (itsCamMatrix.empty()) loadCameraCalibration(w, h);
810 
811  // Convert input image to BGR24, then to HSV:
812  cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
813  cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
814  size_t const nthreads = threads::get();
815 
816  // Make sure our HSV range parameters are up to date:
817  updateHSV(nthreads);
818 
819  // Clear any old detections and get ready to parallelize the detection work:
820  itsDetections.clear();
822 
823  // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
824  std::vector<std::future<void> > dfut;
825  for (size_t i = 0; i < nthreads - 1; ++i)
826  dfut.push_back(std::async(std::launch::async, [&](size_t tn) { detect(imghsv, tn, 3, h+2); }, i));
827  detect(imghsv, nthreads - 1, 3, h+2);
828 
829  // Wait for all threads to complete:
830  for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
831 
832  // Let camera know we are done processing the input image:
833  inframe.done();
834 
835  // Clean up the detections by eliminating duplicates:
837 
838  // Learn the object's HSV value over time:
839  auto learn_fut = std::async(std::launch::async, [&]() { learnHSV(nthreads, imgbgr); });
840 
841  // Map to 6D (inverse perspective):
842  std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
843  estimatePose(corners, rvecs, tvecs);
844 
845  // Send all serial messages:
846  sendAllSerial(w, h, corners, rvecs, tvecs);
847 
848  // Wait for all threads:
849  try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
850 
851  // Show processing fps:
852  timer.stop();
853  }
854 
855  // ####################################################################################################
856  //! Processing function, with USB video output
857  virtual void process(jevois::InputFrame && inframe, jevois::OutputFrame && outframe) override
858  {
859  static jevois::Timer timer("processing");
860 
861  // Wait for next available camera image. Any resolution ok, but require YUYV since we assume it for drawings:
862  jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
863  inimg.require("input", w, h, V4L2_PIX_FMT_YUYV);
864 
865  timer.start();
866 
867  // Load camera calibration if needed:
868  if (itsCamMatrix.empty()) loadCameraCalibration(w, h);
869 
870  // While we process it, start a thread to wait for output frame and paste the input image into it:
871  jevois::RawImage outimg; // main thread should not use outimg until paste thread is complete
872  auto paste_fut = std::async(std::launch::async, [&]() {
873  outimg = outframe.get();
874  outimg.require("output", outimg.width, h + 50, inimg.fmt);
875  if (outimg.width != w && outimg.width != w * 2) LFATAL("Output image width should be 1x or 2x input width");
876  jevois::rawimage::paste(inimg, outimg, 0, 0);
877  jevois::rawimage::writeText(outimg, "JeVois FIRST Vision", 3, 3, jevois::yuyv::White);
878  jevois::rawimage::drawFilledRect(outimg, 0, h, outimg.width, outimg.height-h, jevois::yuyv::Black);
879  });
880 
881  // Convert input image to BGR24, then to HSV:
882  cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
883  cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
884  size_t const nthreads = threads::get();
885 
886  // Make sure our HSV range parameters are up to date:
887  updateHSV(nthreads);
888 
889  // Clear any old detections and get ready to parallelize the detection work:
890  itsDetections.clear();
892 
893  // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
894  std::vector<std::future<void> > dfut;
895  for (size_t i = 0; i < nthreads - 1; ++i)
896  dfut.push_back(std::async(std::launch::async, [&](size_t tn) { detect(imghsv, tn, 3, h+2, &outimg); }, i));
897  detect(imghsv, nthreads - 1, 3, h+2, &outimg);
898 
899  // Wait for all threads to complete:
900  for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
901 
902  // Wait for paste to finish up:
903  paste_fut.get();
904 
905  // Let camera know we are done processing the input image:
906  inframe.done();
907 
908  // Clean up the detections by eliminating duplicates:
910 
911  // Learn the object's HSV value over time:
912  auto learn_fut = std::async(std::launch::async, [&]() { learnHSV(nthreads, imgbgr, &outimg); });
913 
914  // Map to 6D (inverse perspective):
915  std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
916  estimatePose(corners, rvecs, tvecs);
917 
918  // Send all serial messages:
919  sendAllSerial(w, h, corners, rvecs, tvecs);
920 
921  // Draw all detections in 3D:
922  drawDetections(outimg, corners, rvecs, tvecs);
923 
924  // Show number of detected objects:
925  jevois::rawimage::writeText(outimg, "Detected " + std::to_string(itsDetections.size()) + " objects.",
926  w + 3, 3, jevois::yuyv::White);
927 
928  // Wait for all threads:
929  try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
930 
931  // Show processing fps:
932  std::string const & fpscpu = timer.stop();
933  jevois::rawimage::writeText(outimg, fpscpu, 3, h - 13, jevois::yuyv::White);
934 
935  // Send the output image with our processing results to the host over USB:
936  outframe.send();
937  }
938 
939  // ####################################################################################################
940  void drawDetections(jevois::RawImage & outimg, std::vector<std::vector<cv::Point2f> > corners,
941  std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
942  {
943  auto const osiz = objsize::get(); float const w = osiz.width, h = osiz.height;
944  int nobj = int(corners.size());
945 
946  // This code is like drawDetectedMarkers() in cv::aruco, but for YUYV output image:
947  if (rvecs.empty())
948  {
949  // We are not doing 3D pose estimation. Just draw object outlines in 2D:
950  for (int i = 0; i < nobj; ++i)
951  {
952  std::vector<cv::Point2f> const & obj = corners[i];
953 
954  // draw marker sides:
955  for (int j = 0; j < 4; ++j)
956  {
957  cv::Point2f const & p0 = obj[j];
958  cv::Point2f const & p1 = obj[ (j+1) % 4 ];
959  jevois::rawimage::drawLine(outimg, int(p0.x + 0.5F), int(p0.y + 0.5F),
960  int(p1.x + 0.5F), int(p1.y + 0.5F), 1, jevois::yuyv::LightPink);
961  //jevois::rawimage::writeText(outimg, std::to_string(j),
962  // int(p0.x + 0.5F), int(p0.y + 0.5F), jevois::yuyv::White);
963  }
964  }
965  }
966  else
967  {
968  // Show trihedron and parallelepiped centered on object:
969  float const hw = w * 0.5F, hh = h * 0.5F, dd = -0.5F * std::max(w, h);
970 
971  for (int i = 0; i < nobj; ++i)
972  {
973  // Project axis points:
974  std::vector<cv::Point3f> axisPoints;
975  axisPoints.push_back(cv::Point3f(0.0F, 0.0F, 0.0F));
976  axisPoints.push_back(cv::Point3f(hw, 0.0F, 0.0F));
977  axisPoints.push_back(cv::Point3f(0.0F, hh, 0.0F));
978  axisPoints.push_back(cv::Point3f(0.0F, 0.0F, dd));
979 
980  std::vector<cv::Point2f> imagePoints;
981  cv::projectPoints(axisPoints, rvecs[i], tvecs[i], itsCamMatrix, itsDistCoeffs, imagePoints);
982 
983  // Draw axis lines:
984  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
985  int(imagePoints[1].x + 0.5F), int(imagePoints[1].y + 0.5F),
986  2, jevois::yuyv::MedPurple);
987  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
988  int(imagePoints[2].x + 0.5F), int(imagePoints[2].y + 0.5F),
989  2, jevois::yuyv::MedGreen);
990  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
991  int(imagePoints[3].x + 0.5F), int(imagePoints[3].y + 0.5F),
992  2, jevois::yuyv::MedGrey);
993 
994  // Also draw a parallelepiped:
995  std::vector<cv::Point3f> cubePoints;
996  cubePoints.push_back(cv::Point3f(-hw, -hh, 0.0F));
997  cubePoints.push_back(cv::Point3f(hw, -hh, 0.0F));
998  cubePoints.push_back(cv::Point3f(hw, hh, 0.0F));
999  cubePoints.push_back(cv::Point3f(-hw, hh, 0.0F));
1000  cubePoints.push_back(cv::Point3f(-hw, -hh, dd));
1001  cubePoints.push_back(cv::Point3f(hw, -hh, dd));
1002  cubePoints.push_back(cv::Point3f(hw, hh, dd));
1003  cubePoints.push_back(cv::Point3f(-hw, hh, dd));
1004 
1005  std::vector<cv::Point2f> cuf;
1006  cv::projectPoints(cubePoints, rvecs[i], tvecs[i], itsCamMatrix, itsDistCoeffs, cuf);
1007 
1008  // Round all the coordinates:
1009  std::vector<cv::Point> cu;
1010  for (auto const & p : cuf) cu.push_back(cv::Point(int(p.x + 0.5F), int(p.y + 0.5F)));
1011 
1012  // Draw parallelepiped lines:
1013  jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[1].x, cu[1].y, 1, jevois::yuyv::LightGreen);
1014  jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[2].x, cu[2].y, 1, jevois::yuyv::LightGreen);
1015  jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[3].x, cu[3].y, 1, jevois::yuyv::LightGreen);
1016  jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[0].x, cu[0].y, 1, jevois::yuyv::LightGreen);
1017  jevois::rawimage::drawLine(outimg, cu[4].x, cu[4].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
1018  jevois::rawimage::drawLine(outimg, cu[5].x, cu[5].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
1019  jevois::rawimage::drawLine(outimg, cu[6].x, cu[6].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
1020  jevois::rawimage::drawLine(outimg, cu[7].x, cu[7].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
1021  jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
1022  jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
1023  jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
1024  jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
1025  }
1026  }
1027  }
1028 };
1029 
1030 // Allow the module to be loaded as a shared object (.so) file:
std::vector< cv::Point > approx
Smoothed approximation of the contour.
Definition: FirstVision.C:350
Simple color-based detection of a U-shaped object for FIRST Robotics.
Definition: FirstVision.C:292
std::string warnAndIgnoreException()
void sendSerialContour2D(unsigned int camw, unsigned int camh, std::vector< cv::Point_< T > > points, std::string const &id="", std::string const &extra="")
cv::Mat itsErodeElement
Erosion and dilation kernels shared across all detect threads.
Definition: FirstVision.C:364
Send serial message to mark the the end(MARK STOP)
bool itsCueChanged
True when users change ranges.
Definition: FirstVision.C:300
void learnHSV(size_t nthreads, cv::Mat const &imgbgr, jevois::RawImage *outimg=nullptr)
Learn and update our HSV ranges.
Definition: FirstVision.C:687
void updateHSV(size_t nthreads)
Initialize (e.g., if user changes cue params) or update our HSV detection ranges. ...
Definition: FirstVision.C:603
cv::Mat itsDistCoeffs
Our camera distortion coefficients.
Definition: FirstVision.C:299
void writeText(RawImage &img, std::string const &txt, int x, int y, unsigned int col, Font font=Font6x10)
cv::Mat convertToCvBGR(RawImage const &src)
virtual void process(jevois::InputFrame &&inframe, jevois::OutputFrame &&outframe) override
Processing function, with USB video output.
Definition: FirstVision.C:857
unsigned int height
void onParamChange(scue const &param, unsigned char const &newval)
Definition: FirstVision.C:303
float siv
Mean and sigma for V.
Definition: FirstVision.C:340
void drawLine(RawImage &img, int x1, int y1, int x2, int y2, unsigned int thick, unsigned int col)
void estimatePose(std::vector< std::vector< cv::Point2f > > &corners, cv::OutputArray _rvecs, cv::OutputArray _tvecs)
Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners...
Definition: FirstVision.C:418
STL namespace.
std::string sformat(char const *fmt,...) __attribute__((format(__printf__
virtual ~FirstVision()
Virtual destructor for safe inheritance.
Definition: FirstVision.C:412
unsigned int fmt
cv::Scalar rmin() const
Get minimum triplet for use by cv::inRange()
Definition: FirstVision.C:331
std::mutex itsDetMtx
Definition: FirstVision.C:358
void detect(cv::Mat const &imghsv, size_t tnum, int dispx=3, int dispy=242, jevois::RawImage *outimg=nullptr)
HSV object detector, we run several of those in parallel with different hsvcue settings.
Definition: FirstVision.C:470
void loadCameraCalibration(unsigned int w, unsigned int h)
Load camera calibration parameters.
Definition: FirstVision.C:451
void sendAllSerial(int w, int h, std::vector< std::vector< cv::Point2f > > const &corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Send serial messages about each detection:
Definition: FirstVision.C:746
void fix()
Fix ranges so they don&#39;t go out of bounds.
Definition: FirstVision.C:323
Helper struct for an HSV range triplet, where each range is specified as a mean and sigma: ...
Definition: FirstVision.C:311
JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(dataroot, std::string, "Root path for data, config, and weight files. " "If empty, use the module's path.", JEVOIS_SHARE_PATH "/darknet/single", ParamCateg)
Parameter.
StdModule(std::string const &instance)
SinglePoseEstimationParallel(cv::Mat &_objPoints, cv::InputArrayOfArrays _corners, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::Mat &_rvecs, cv::Mat &_tvecs)
Definition: FirstVision.C:372
std::vector< cv::Point > contour
The full detailed contour.
Definition: FirstVision.C:349
void onParamChange(hcue const &param, unsigned char const &newval)
Definition: FirstVision.C:302
hsvcue(unsigned char h, unsigned char s, unsigned char v)
Constructor.
Definition: FirstVision.C:314
void drawDetections(jevois::RawImage &outimg, std::vector< std::vector< cv::Point2f > > corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Definition: FirstVision.C:940
double area(const std::vector< Point2D< T > > &polygon, const bool getsigned=false)
What is the area of a polygon?
Definition: Point2D.H:422
float sih
Mean and sigma for H.
Definition: FirstVision.C:338
float sis
Mean and sigma for S.
Definition: FirstVision.C:339
std::vector< detection > itsDetections
Our detections, combined across all threads.
Definition: FirstVision.C:357
void onParamChange(vcue const &param, unsigned char const &newval)
Definition: FirstVision.C:304
virtual void process(jevois::InputFrame &&inframe) override
Processing function, no USB video output.
Definition: FirstVision.C:799
void cleanupDetections()
Clean up the detections by eliminating duplicates:
Definition: FirstVision.C:648
std::string const & stop()
cv::Scalar rmax() const
Get maximum triplet for use by cv::inRange()
Definition: FirstVision.C:335
void freeze()
void operator()(cv::Range const &range) const
Definition: FirstVision.C:379
JEVOIS_DECLARE_PARAMETER(camparams, std::string, "File stem of camera parameters, or empty. Camera resolution " "will be appended, as well as a .cfg extension. For example, specifying 'camera_para' " "here and running the camera sensor at 320x240 will attempt to load " "camera_para320x240.dat from within the module's directory.", "camera_para", ParamCateg)
Parameter.
std::vector< cv::Point > hull
Convex hull of the contour.
Definition: FirstVision.C:351
void sendSerialStd3D(float x, float y, float z, float w=0.0F, float h=0.0F, float d=0.0F, float q1=0.0F, float q2=0.0F, float q3=0.0f, float q4=0.0F, std::string const &id="", std::string const &extra="")
#define LFATAL(msg)
std::shared_ptr< Kalman1D > itsKalS
Definition: FirstVision.C:361
void drawFilledRect(RawImage &img, int x, int y, unsigned int w, unsigned int h, unsigned int col)
std::shared_ptr< Kalman1D > itsKalV
Definition: FirstVision.C:361
void pasteGreyToYUYV(cv::Mat const &src, RawImage &dest, int dx, int dy)
std::string to_string(T const &val)
float serr
Shape error score (higher for rougher contours with defects)
Definition: FirstVision.C:353
size_t threadnum
Thread number that detected this object.
Definition: FirstVision.C:352
void updateStructuringElements()
Update the morphology structuring elements if needed.
Definition: FirstVision.C:780
std::shared_ptr< Kalman1D > itsKalH
Kalman filters to learn and adapt HSV windows over time.
Definition: FirstVision.C:361
JEVOIS_REGISTER_MODULE(FirstVision)
cv::Mat itsCamMatrix
Our camera matrix.
Definition: FirstVision.C:298
#define LINFO(msg)
std::vector< hsvcue > itsHSV
Definition: FirstVision.C:343
Helper struct for a detected object.
Definition: FirstVision.C:347
unsigned int width
cv::Mat itsDilateElement
Definition: FirstVision.C:364
ParallelLoopBody class for the parallelization of the single markers pose estimation.
Definition: FirstVision.C:369
void paste(RawImage const &src, RawImage &dest, int dx, int dy)
FirstVision(std::string const &instance)
Constructor.
Definition: FirstVision.C:403
hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig, unsigned char v, unsigned char vsig)
Constructor.
Definition: FirstVision.C:318
void require(char const *info, unsigned int w, unsigned int h, unsigned int f) const