JeVoisBase  1.20
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
FirstVision.C
Go to the documentation of this file.
1 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2 //
3 // JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
4 // California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5 //
6 // This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7 // redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8 // Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 // without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10 // License for more details. You should have received a copy of the GNU General Public License along with this program;
11 // if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12 //
13 // Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14 // Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15 // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16 /*! \file */
17 
18 #include <jevois/Core/Module.H>
19 #include <jevois/Debug/Log.H>
20 #include <jevois/Util/Utils.H>
22 #include <jevois/Debug/Timer.H>
24 
26 
27 #include <opencv2/core/core.hpp>
28 #include <opencv2/imgproc/imgproc.hpp>
29 #include <opencv2/calib3d/calib3d.hpp>
30 
31 #include <Eigen/Geometry> // for AngleAxis and Quaternion
32 
33 // REMINDER: make sure you understand the viral nature and terms of the above license. If you are writing code derived
34 // from this file, you must offer your source under the GPL license too.
35 
36 static jevois::ParameterCategory const ParamCateg("FirstVision Options");
37 
38 //! Parameter \relates FirstVision
39 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(hcue, unsigned char, "Initial cue for target hue (0=red/do not use because of "
40  "wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan, "
41  "105=light blue, 120=blue, 135=purple, 150=pink)",
42  45, jevois::Range<unsigned char>(0, 179), ParamCateg);
43 
44 //! Parameter \relates FirstVision
45 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(scue, unsigned char, "Initial cue for target saturation lower bound",
46  50, ParamCateg);
47 
48 //! Parameter \relates FirstVision
49 JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(vcue, unsigned char, "Initial cue for target value (brightness) lower bound",
50  200, ParamCateg);
51 
52 //! Parameter \relates FirstVision
53 JEVOIS_DECLARE_PARAMETER(maxnumobj, size_t, "Max number of objects to declare a clean image. If more blobs are "
54  "detected in a frame, we skip that frame before we even try to analyze shapes of the blobs",
55  100, ParamCateg);
56 
57 //! Parameter \relates FirstVision
58 JEVOIS_DECLARE_PARAMETER(hullarea, jevois::Range<unsigned int>, "Range of object area (in pixels) to track. Use this "
59  "if you want to skip shape analysis of very large or very small blobs",
60  jevois::Range<unsigned int>(20*20, 300*300), ParamCateg);
61 
62 //! Parameter \relates FirstVision
63 JEVOIS_DECLARE_PARAMETER(hullfill, int, "Max fill ratio of the convex hull (percent). Lower values mean your shape "
64  "occupies a smaller fraction of its convex hull. This parameter sets an upper bound, "
65  "fuller shapes will be rejected.",
66  50, jevois::Range<int>(1, 100), ParamCateg);
67 
68 //! Parameter \relates FirstVision
69 JEVOIS_DECLARE_PARAMETER(erodesize, size_t, "Erosion structuring element size (pixels), or 0 for no erosion",
70  2, ParamCateg);
71 
72 //! Parameter \relates FirstVision
73 JEVOIS_DECLARE_PARAMETER(dilatesize, size_t, "Dilation structuring element size (pixels), or 0 for no dilation",
74  4, ParamCateg);
75 
76 //! Parameter \relates FirstVision
77 JEVOIS_DECLARE_PARAMETER(epsilon, double, "Shape smoothing factor (higher for smoother). Shape smoothing is applied "
78  "to remove small contour defects before the shape is analyzed.",
79  0.015, jevois::Range<double>(0.001, 0.999), ParamCateg);
80 
81 //! Parameter \relates FirstVision
82 JEVOIS_DECLARE_PARAMETER(debug, bool, "Show contours of all object candidates if true",
83  false, ParamCateg);
84 
85 //! Parameter \relates FirstVision
86 JEVOIS_DECLARE_PARAMETER(threads, size_t, "Number of parallel vision processing threads. Thread 0 uses the HSV values "
87  "provided by user parameters; thread 1 broadens that fixed range a bit; threads 2-3 use a "
88  "narrow and broader learned HSV window over time",
89  4, jevois::Range<size_t>(2, 4), ParamCateg);
90 
91 //! Parameter \relates FirstVision
92 JEVOIS_DECLARE_PARAMETER(showthread, size_t, "Thread number that is used to display HSV-thresholded image",
93  0, jevois::Range<size_t>(0, 3), ParamCateg);
94 
95 //! Parameter \relates FirstVision
96 JEVOIS_DECLARE_PARAMETER(ethresh, double, "Shape error threshold (lower is stricter for exact shape)",
97  900.0, jevois::Range<double>(0.01, 1000.0), ParamCateg);
98 
99 //! Parameter \relates FirstVision
100 JEVOIS_DECLARE_PARAMETER(dopose, bool, "Compute (and show) 6D object pose, requires a valid camera calibration. "
101  "When dopose is true, 3D serial messages are sent out, otherwise 2D serial messages.",
102  true, ParamCateg);
103 
104 //! Parameter \relates FirstVision
105 JEVOIS_DECLARE_PARAMETER(camparams, std::string, "File stem of camera parameters, or empty. Camera resolution "
106  "will be appended, as well as a .yaml extension. For example, specifying 'calibration' "
107  "here and running the camera sensor at 320x240 will attempt to load "
108  "calibration320x240.yaml from within directory " JEVOIS_SHARE_PATH "/camera/",
109  "calibration", ParamCateg);
110 
111 //! Parameter \relates FirstVision
112 JEVOIS_DECLARE_PARAMETER(iou, double, "Intersection-over-union ratio over which duplicates are eliminated",
113  0.3, jevois::Range<double>(0.01, 0.99), ParamCateg);
114 
115 //! Parameter \relates FirstVision
116 JEVOIS_DECLARE_PARAMETER(objsize, cv::Size_<float>, "Object size (in meters)",
117  cv::Size_<float>(0.28F, 0.175F), ParamCateg);
118 
119 //! Parameter \relates FirstVision
120 JEVOIS_DECLARE_PARAMETER(margin, size_t, "Margin from from frame borders (pixels). If any corner of a detected shape "
121  "gets closer than the margin to the frame borders, the shape will be rejected. This is to "
122  "avoid possibly bogus 6D pose estimation when the shape starts getting truncated as it "
123  "partially exits the camera's field of view.",
124  5, ParamCateg);
125 
126 //! Simple color-based detection of a U-shaped object for FIRST Robotics
127 /*! This module isolates pixels within a given HSV range (hue, saturation, and value of color pixels), does some
128  cleanups, and extracts object contours. It is looking for a rectangular U shape of a specific size (set by parameter
129  \p objsize). See screenshots for an example of shape. It sends information about detected objects over serial.
130 
131  This module usually works best with the camera sensor set to manual exposure, manual gain, manual color balance, etc
132  so that HSV color values are reliable. See the file \b script.cfg file in this module's directory for an example of
133  how to set the camera settings each time this module is loaded.
134 
135  This code was loosely inspired by the JeVois \jvmod{ObjectTracker} module. Also see \jvmod{FirstPython} for a
136  simplified version of this module, written in Python.
137 
138  This module is provided for inspiration. It has no pretension of actually solving the FIRST Robotics vision problem
139  in a complete and reliable way. It is released in the hope that FRC teams will try it out and get inspired to
140  develop something much better for their own robot.
141 
142  General pipeline
143  ----------------
144 
145  The basic idea of this module is the classic FIRST robotics vision pipeline: first, select a range of pixels in HSV
146  color pixel space likely to include the object. Then, detect contours of all blobs in range. Then apply some tests
147  on the shape of the detected blobs, their size, fill ratio (ratio of object area compared to its convex hull's
148  area), etc. Finally, estimate the location and pose of the object in the world.
149 
150  In this module, we run up to 4 pipelines in parallel, using different settings for the range of HSV pixels
151  considered:
152 
153  - Pipeline 0 uses the HSV values provided by user parameters;
154  - Pipeline 1 broadens that fixed range a bit;
155  - Pipelines 2-3 use a narrow and broader learned HSV window over time.
156 
157  Detections from all 4 pipelines are considered for overlap and quality (raggedness of their outlines), and only the
158  cleanest of several overlapping detections is preserved. From those cleanest detections, pipelines 2-3 learn and
159  adapt the HSV range for future video frames.
160 
161  Using this module
162  -----------------
163 
164  Check out [this tutorial](http://jevois.org/tutorials/UserFirstVision.html).
165 
166  Detection and quality control steps
167  -----------------------------------
168 
169  The following messages appear for each of the 4 pipelines, at the bottom of the demo video, to help users figure out
170  why their object may not be detected:
171 
172  - T0 to T3: thread (pipeline) number
173  - H=..., S=..., V=...: HSV range considered by that thread
174  - N=...: number of raw blobs detected in that range
175  - Because N blobs are considered in each thread from this point on, information about only the one that progressed
176  the farthest through a series of tests is shown. One letter is added each time a test is passed:
177  + H: the convex hull of the blob is quadrilateral (4 vertices)
178  + A: hull area is within range specified by parameter \p hullarea
179  + F: object to hull fill ratio is below the limit set by parameter \p hullfill (i.e., object is not a solid,
180  filled quadrilateral shape)
181  + S: the object has 8 vertices after shape smoothing to eliminate small shape defects (a U shape is
182  indeed expected to have 8 vertices).
183  + E: The shape discrepency between the original shape and the smoothed shape is acceptable per parameter
184  \p ethresh, i.e., the original contour did not have a lot of defects.
185  + M: the shape is not too close to the borders of the image, per parameter \p margin, i.e., it is unlikely to
186  be truncated as the object partially exits the camera's field of view.
187  + V: Vectors describing the shape as it related to its convex hull are non-zero, i.e., the centroid of the shape
188  is not exactly coincident with the centroid of its convex hull, as we would expect for a U shape.
189  + U: the shape is roughly upright; upside-down U shapes are rejected as likely spurious.
190  + OK: this thread detected at least one shape that passed all the tests.
191 
192  The black and white picture at right shows the pixels in HSV range for the thread determined by parameter \p
193  showthread (with value 0 by default).
194 
195  Serial Messages
196  ---------------
197 
198  This module can send standardized serial messages as described in \ref UserSerialStyle. One message is issued on
199  every video frame for each detected and good object. The \p id field in the messages simply is \b FIRST for all
200  messages.
201 
202  When \p dopose is turned on, 3D messages will be sent, otherwise 2D messages.
203 
204  2D messages when \p dopose is off:
205 
206  - Serial message type: \b 2D
207  - `id`: always `FIRST`
208  - `x`, `y`, or vertices: standardized 2D coordinates of object center or corners
209  - `w`, `h`: standardized marker size
210  - `extra`: none (empty string)
211 
212  3D messages when \p dopose is on:
213 
214  - Serial message type: \b 3D
215  - `id`: always `FIRST`
216  - `x`, `y`, `z`, or vertices: 3D coordinates in millimeters of object center, or corners
217  - `w`, `h`, `d`: object size in millimeters, a depth of 1mm is always used
218  - `extra`: none (empty string)
219 
220  NOTE: 3D pose estimation from low-resolution 176x144 images at 120fps can be quite noisy. Make sure you tune your
221  HSV ranges very well if you want to operate at 120fps (see below). To operate more reliably at very low resolutions,
222  one may want to improve this module by adding subpixel shape refinement and tracking across frames.
223 
224  See \ref UserSerialStyle for more on standardized serial messages, and \ref coordhelpers for more info on
225  standardized coordinates.
226 
227  Trying it out
228  -------------
229 
230  The default parameter settings (which are set in \b script.cfg explained below) attempt to detect yellow-green
231  objects. Present an object to the JeVois camera and see whether it is detected. When detected and good
232  enough according to a number of quality control tests, the outline of the object is drawn.
233 
234  For further use of this module, you may want to check out the following tutorials:
235 
236  - [Using the sample FIRST Robotics vision module](http://jevois.org/tutorials/UserFirstVision.html)
237  - [Tuning the color-based object tracker using a python graphical
238  interface](http://jevois.org/tutorials/UserColorTracking.html)
239  - [Making a motorized pan-tilt head for JeVois and tracking
240  objects](http://jevois.org/tutorials/UserPanTilt.html)
241  - \ref ArduinoTutorial
242 
243  Tuning
244  ------
245 
246  You need to provide the exact width and height of your physical shape to parameter \p objsize for this module to
247  work. It will look for a shape of that physical size (though at any distance and orientation from the camera). Be
248  sure you edit \b script.cfg and set the parameter \p objsize in there to the true measured physical size of your
249  shape.
250 
251  You should adjust parameters \p hcue, \p scue, and \p vcue to isolate the range of Hue, Saturation, and Value
252  (respectively) that correspond to the objects you want to detect. Note that there is a \b script.cfg file in this
253  module's directory that provides a range tuned to a light yellow-green object, as shown in the demo screenshot.
254 
255  Tuning the parameters is best done interactively by connecting to your JeVois camera while it is looking at some
256  object of the desired color. Once you have achieved a tuning, you may want to set the hcue, scue, and vcue
257  parameters in your \b script.cfg file for this module on the microSD card (see below).
258 
259  Typically, you would start by narrowing down on the hue, then the value, and finally the saturation. Make sure you
260  also move your camera around and show it typical background clutter so check for false positives (detections of
261  things which you are not interested, which can happen if your ranges are too wide).
262 
263  Config file
264  -----------
265 
266  JeVois allows you to store parameter settings and commands in a file named \b script.cfg stored in the directory of
267  a module. The file \b script.cfg may contain any sequence of commands as you would type them interactively in the
268  JeVois command-line interface. For the \jvmod{FirstVision} module, a default script is provided that sets the camera
269  to manual color, gain, and exposure mode (for more reliable color values), and other example parameter values.
270 
271  The \b script.cfg file for \jvmod{FirstVision} is stored on your microSD at
272  <b>JEVOIS:/modules/JeVois/FirstVision/script.cfg</b>
273 
274  @author Laurent Itti
275 
276  @videomapping YUYV 176 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
277  @videomapping YUYV 352 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
278  @videomapping YUYV 320 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
279  @videomapping YUYV 640 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
280  @videomapping NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstVision
281  @videomapping NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstVision
282  @email itti\@usc.edu
283  @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
284  @copyright Copyright (C) 2017 by Laurent Itti, iLab and the University of Southern California
285  @mainurl http://jevois.org
286  @supporturl http://jevois.org/doc
287  @otherurl http://iLab.usc.edu
288  @license GPL v3
289  @distribution Unrestricted
290  @restrictions None
291  \ingroup modules */
293  public jevois::Parameter<hcue, scue, vcue, maxnumobj, hullarea, hullfill, erodesize,
294  dilatesize, epsilon, debug, threads, showthread, ethresh,
295  dopose, camparams, iou, objsize, margin>
296 {
297  protected:
298  cv::Mat itsCamMatrix; //!< Our camera matrix
299  cv::Mat itsDistCoeffs; //!< Our camera distortion coefficients
300  bool itsCueChanged = true; //!< True when users change ranges
301 
302  void onParamChange(hcue const & /*param*/, unsigned char const & /*newval*/) override { itsCueChanged = true; }
303  void onParamChange(scue const & /*param*/, unsigned char const & /*newval*/) override { itsCueChanged = true; }
304  void onParamChange(vcue const & /*param*/, unsigned char const & /*newval*/) override { itsCueChanged = true; }
305 
306  // ####################################################################################################
307  //! Helper struct for an HSV range triplet, where each range is specified as a mean and sigma:
308  /*! Note that sigma is used differently for H, S, and V, under the assumption that we want to track a bright target:
309  For H, the range is [mean-sigma .. mean+sigma]. For S and V, the range is [mean-sigma .. 255]. See rmin() and
310  rmax() for details. */
311  struct hsvcue
312  {
313  //! Constructor
314  hsvcue(unsigned char h, unsigned char s, unsigned char v) : muh(h), sih(30), mus(s), sis(20), muv(v), siv(20)
315  { fix(); }
316 
317  //! Constructor
318  hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig,
319  unsigned char v, unsigned char vsig) : muh(h), sih(hsig), mus(s), sis(ssig), muv(v), siv(vsig)
320  { fix(); }
321 
322  //! Fix ranges so they don't go out of bounds
323  void fix()
324  {
325  muh = std::min(179.0F, std::max(1.0F, muh)); sih = std::max(1.0F, std::min(sih, 360.0F));
326  mus = std::min(254.0F, std::max(1.0F, mus)); sis = std::max(1.0F, std::min(sis, 512.0F));
327  muv = std::min(254.0F, std::max(1.0F, muv)); siv = std::max(1.0F, std::min(siv, 512.0F));
328  }
329 
330  //! Get minimum triplet for use by cv::inRange()
331  cv::Scalar rmin() const
332  { return cv::Scalar(std::max(0.0F, muh - sih), std::max(0.0F, mus - sis), std::max(0.0F, muv - siv)); }
333 
334  //! Get maximum triplet for use by cv::inRange()
335  cv::Scalar rmax() const
336  { return cv::Scalar(std::min(179.0F, muh + sih), 255, 255); }
337 
338  float muh, sih; //!< Mean and sigma for H
339  float mus, sis; //!< Mean and sigma for S
340  float muv, siv; //!< Mean and sigma for V
341  };
342 
343  std::vector<hsvcue> itsHSV;
344 
345  // ####################################################################################################
346  //! Helper struct for a detected object
347  struct detection
348  {
349  std::vector<cv::Point> contour; //!< The full detailed contour
350  std::vector<cv::Point> approx; //!< Smoothed approximation of the contour
351  std::vector<cv::Point> hull; //!< Convex hull of the contour
352  size_t threadnum; //!< Thread number that detected this object
353  float serr; //!< Shape error score (higher for rougher contours with defects)
354  };
355 
356  //! Our detections, combined across all threads
357  std::vector<detection> itsDetections;
358  std::mutex itsDetMtx;
359 
360  //! Kalman filters to learn and adapt HSV windows over time
361  std::shared_ptr<Kalman1D> itsKalH, itsKalS, itsKalV;
362 
363  //! Erosion and dilation kernels shared across all detect threads
365 
366  // ####################################################################################################
367  //! ParallelLoopBody class for the parallelization of the single markers pose estimation
368  /*! Derived from opencv_contrib ArUco module, it's just a simple solvePnP inside. */
369  class SinglePoseEstimationParallel : public cv::ParallelLoopBody
370  {
371  public:
372  SinglePoseEstimationParallel(cv::Mat & _objPoints, cv::InputArrayOfArrays _corners,
373  cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
374  cv::Mat & _rvecs, cv::Mat & _tvecs) :
375  objPoints(_objPoints), corners(_corners), cameraMatrix(_cameraMatrix),
376  distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs)
377  { }
378 
379  void operator()(cv::Range const & range) const
380  {
381  int const begin = range.start;
382  int const end = range.end;
383 
384  for (int i = begin; i < end; ++i)
385  cv::solvePnP(objPoints, corners.getMat(i), cameraMatrix, distCoeffs,
386  rvecs.at<cv::Vec3d>(i), tvecs.at<cv::Vec3d>(i));
387  }
388 
389  private:
390  cv::Mat & objPoints;
391  cv::InputArrayOfArrays corners;
392  cv::InputArray cameraMatrix, distCoeffs;
393  cv::Mat & rvecs, tvecs;
394  };
395 
396  // ####################################################################################################
397  // ####################################################################################################
398  // ####################################################################################################
399 
400  public:
401  // ####################################################################################################
402  //! Constructor
403  FirstVision(std::string const & instance) : jevois::StdModule(instance)
404  {
405  itsKalH = addSubComponent<Kalman1D>("kalH");
406  itsKalS = addSubComponent<Kalman1D>("kalS");
407  itsKalV = addSubComponent<Kalman1D>("kalV");
408  }
409 
410  // ####################################################################################################
411  //! Virtual destructor for safe inheritance
412  virtual ~FirstVision() { }
413 
414  // ####################################################################################################
415  //! Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners
416  /*! Inspired from the ArUco module of opencv_contrib
417  The corners array is always filled, but rvecs and tvecs only are if dopose is true */
418  void estimatePose(std::vector<std::vector<cv::Point2f> > & corners, cv::OutputArray _rvecs,
419  cv::OutputArray _tvecs)
420  {
421  auto const osiz = objsize::get();
422 
423  // Get a vector of all our corners so we can map them to 3D and draw them:
424  corners.clear();
425  for (detection const & d : itsDetections)
426  {
427  corners.push_back(std::vector<cv::Point2f>());
428  std::vector<cv::Point2f> & v = corners.back();
429  for (auto const & p : d.hull) v.push_back(cv::Point2f(p));
430  }
431 
432  if (dopose::get())
433  {
434  // set coordinate system in the middle of the object, with Z pointing out
435  cv::Mat objPoints(4, 1, CV_32FC3);
436  objPoints.ptr< cv::Vec3f >(0)[0] = cv::Vec3f(-osiz.width * 0.5F, -osiz.height * 0.5F, 0);
437  objPoints.ptr< cv::Vec3f >(0)[1] = cv::Vec3f(-osiz.width * 0.5F, osiz.height * 0.5F, 0);
438  objPoints.ptr< cv::Vec3f >(0)[2] = cv::Vec3f(osiz.width * 0.5F, osiz.height * 0.5F, 0);
439  objPoints.ptr< cv::Vec3f >(0)[3] = cv::Vec3f(osiz.width * 0.5F, -osiz.height * 0.5F, 0);
440 
441  int nobj = (int)corners.size();
442  _rvecs.create(nobj, 1, CV_64FC3); _tvecs.create(nobj, 1, CV_64FC3);
443  cv::Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
444  cv::parallel_for_(cv::Range(0, nobj), SinglePoseEstimationParallel(objPoints, corners, itsCamMatrix,
445  itsDistCoeffs, rvecs, tvecs));
446  }
447  }
448 
449  // ####################################################################################################
450  //! Load camera calibration parameters
451  void loadCameraCalibration(unsigned int w, unsigned int h)
452  {
454 
455  std::string const cpf = std::string(JEVOIS_SHARE_PATH) + "/camera/" + camparams::get() +
456  std::to_string(w) + 'x' + std::to_string(h) + ".yaml";
457 
458  cv::FileStorage fs(cpf, cv::FileStorage::READ);
459  if (fs.isOpened())
460  {
461  fs["camera_matrix"] >> itsCamMatrix;
462  fs["distortion_coefficients"] >> itsDistCoeffs;
463  LINFO("Loaded camera calibration from " << cpf);
464  }
465  else
466  {
467  LERROR("Failed to read camera parameters from file [" << cpf << "] -- IGNORED");
468  itsCamMatrix = cv::Mat::eye(3, 3, CV_64F);
469  itsDistCoeffs = cv::Mat::zeros(5, 1, CV_64F);
470  }
471  }
472 
473  // ####################################################################################################
474  //! HSV object detector, we run several of those in parallel with different hsvcue settings
475  void detect(cv::Mat const & imghsv, size_t tnum, int dispx = 3, int dispy = 242, jevois::RawImage *outimg = nullptr)
476  {
477  // Threshold the HSV image to only keep pixels within the desired HSV range:
478  cv::Mat imgth;
479  hsvcue const & hsv = itsHSV[tnum]; cv::Scalar const rmin = hsv.rmin(), rmax = hsv.rmax();
480  cv::inRange(imghsv, rmin, rmax, imgth);
481  std::string str = jevois::sformat("T%zu: H=%03d-%03d S=%03d-%03d V=%03d-%03d ", tnum, int(rmin.val[0]),
482  int(rmax.val[0]), int(rmin.val[1]), int(rmax.val[1]),
483  int(rmin.val[2]), int(rmax.val[2]));
484 
485  // Apply morphological operations to cleanup the image noise:
486  if (itsErodeElement.empty() == false) cv::erode(imgth, imgth, itsErodeElement);
487  if (itsDilateElement.empty() == false) cv::dilate(imgth, imgth, itsDilateElement);
488 
489  // Detect objects by finding contours:
490  std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy;
491  cv::findContours(imgth, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
492  str += jevois::sformat("N=%03zu ", hierarchy.size());
493 
494  double const epsi = epsilon::get();
495  int const m = margin::get();
496 
497  // Identify the "good" objects:
498  std::string str2, beststr2;
499  if (hierarchy.size() > 0 && hierarchy.size() <= maxnumobj::get())
500  {
501  for (int index = 0; index >= 0; index = hierarchy[index][0])
502  {
503  // Keep track of our best detection so far:
504  if (str2.length() > beststr2.length()) beststr2 = str2;
505  str2.clear();
506 
507  // Let's examine this contour:
508  std::vector<cv::Point> const & c = contours[index];
509  detection d;
510 
511  // Compute contour area:
512  double const area = cv::contourArea(c, false);
513 
514  // Compute convex hull:
515  std::vector<cv::Point> rawhull;
516  cv::convexHull(c, rawhull, true);
517  double const rawhullperi = cv::arcLength(rawhull, true);
518  cv::approxPolyDP(rawhull, d.hull, epsi * rawhullperi * 3.0, true);
519 
520  // Is it the right shape?
521  if (d.hull.size() != 4) continue; // 4 vertices for the rectangular convex outline (shows as a trapezoid)
522  str2 += "H"; // Hull is quadrilateral
523 
524  double const huarea = cv::contourArea(d.hull, false);
525  if ( ! hullarea::get().contains(int(huarea + 0.4999))) continue;
526  str2 += "A"; // Hull area ok
527 
528  int const hufill = int(area / huarea * 100.0 + 0.4999);
529  if (hufill > hullfill::get()) continue;
530  str2 += "F"; // Fill is ok
531 
532  // Check object shape:
533  double const peri = cv::arcLength(c, true);
534  cv::approxPolyDP(c, d.approx, epsi * peri, true);
535  if (d.approx.size() < 7 || d.approx.size() > 9) continue; // 8 vertices for a U shape
536  str2 += "S"; // Shape is ok
537 
538  // Compute contour serr:
539  d.serr = 100.0 * cv::matchShapes(c, d.approx, cv::CONTOURS_MATCH_I1, 0.0);
540  if (d.serr > ethresh::get()) continue;
541  str2 += "E"; // Shape error is ok
542 
543  // Reject the shape if any of its vertices gets within the margin of the image bounds. This is to avoid
544  // getting grossly incorrect 6D pose estimates as the shape starts getting truncated as it partially exits the
545  // camera field of view:
546  bool reject = false;
547  for (size_t i = 0; i < c.size(); ++i)
548  if (c[i].x < m || c[i].x >= imghsv.cols - m || c[i].y < m || c[i].y >= imghsv.rows - m)
549  { reject = true; break; }
550  if (reject) continue;
551  str2 += "M"; // Margin ok
552 
553  // Re-order the 4 points in the hull if needed: In the pose estimation code, we will assume vertices ordered
554  // as follows:
555  //
556  // 0| |3
557  // | |
558  // | |
559  // 1----------2
560 
561  // v10+v23 should be pointing outward the U more than v03+v12 is:
562  std::complex<float> v10p23(float(d.hull[0].x - d.hull[1].x + d.hull[3].x - d.hull[2].x),
563  float(d.hull[0].y - d.hull[1].y + d.hull[3].y - d.hull[2].y));
564  float const len10p23 = std::abs(v10p23);
565  std::complex<float> v03p12(float(d.hull[3].x - d.hull[0].x + d.hull[2].x - d.hull[1].x),
566  float(d.hull[3].y - d.hull[0].y + d.hull[2].y - d.hull[1].y));
567  float const len03p12 = std::abs(v03p12);
568 
569  // Vector from centroid of U shape to centroid of its hull should also point outward of the U:
570  cv::Moments const momC = cv::moments(c);
571  cv::Moments const momH = cv::moments(d.hull);
572  std::complex<float> vCH(momH.m10 / momH.m00 - momC.m10 / momC.m00, momH.m01 / momH.m00 - momC.m01 / momC.m00);
573  float const lenCH = std::abs(vCH);
574 
575  if (len10p23 < 0.1F || len03p12 < 0.1F || lenCH < 0.1F) continue;
576  str2 += "V"; // Shape vectors ok
577 
578  float const good = (v10p23.real() * vCH.real() + v10p23.imag() * vCH.imag()) / (len10p23 * lenCH);
579  float const bad = (v03p12.real() * vCH.real() + v03p12.imag() * vCH.imag()) / (len03p12 * lenCH);
580 
581  // We reject upside-down detections as those are likely to be spurious:
582  if (vCH.imag() >= -2.0F) continue;
583  str2 += "U"; // U shape is upright
584 
585  // Fixup the ordering of the vertices if needed:
586  if (bad > good) { d.hull.insert(d.hull.begin(), d.hull.back()); d.hull.pop_back(); }
587 
588  // This detection is a keeper:
589  str2 += " OK";
590  d.contour = c;
591  std::lock_guard<std::mutex> _(itsDetMtx);
592  itsDetections.push_back(d);
593  }
594  if (str2.length() > beststr2.length()) beststr2 = str2;
595  }
596 
597  // Display any results requested by the users:
598  if (outimg && outimg->valid())
599  {
600  if (tnum == showthread::get() && int(outimg->width) == 2 * imgth.cols)
601  jevois::rawimage::pasteGreyToYUYV(imgth, *outimg, imgth.cols, 0);
602  jevois::rawimage::writeText(*outimg, str + beststr2, dispx, dispy + 12*tnum, jevois::yuyv::White);
603  }
604  }
605 
606  // ####################################################################################################
607  //! Initialize (e.g., if user changes cue params) or update our HSV detection ranges
608  void updateHSV(size_t nthreads)
609  {
610  float const spread = 0.2F;
611 
612  if (itsHSV.empty() || itsCueChanged)
613  {
614  // Initialize or reset because of user parameter change:
615  itsHSV.clear(); itsCueChanged = false;
616  for (size_t i = 0; i < nthreads; ++i)
617  {
618  hsvcue cue(hcue::get(), scue::get(), vcue::get());
619  cue.sih *= (1.0F + spread * i); cue.sis *= (1.0F + spread * i); cue.siv *= (1.0F + spread * i);
620  cue.fix();
621  itsHSV.push_back(cue);
622  }
623  if (nthreads > 2)
624  {
625  itsKalH->set(hcue::get()); itsKalH->get();
626  itsKalS->set(scue::get()); itsKalS->get();
627  itsKalV->set(vcue::get()); itsKalV->get();
628  }
629  }
630  else
631  {
632  // Kalman update:
633  if (nthreads > 2)
634  {
635  itsHSV[2].muh = itsKalH->get();
636  itsHSV[2].mus = itsKalS->get();
637  itsHSV[2].muv = itsKalV->get();
638  itsHSV[2].fix();
639  for (size_t i = 3; i < itsHSV.size(); ++i)
640  {
641  itsHSV[i] = itsHSV[2];
642  itsHSV[i].sih *= (1.0F + spread * i);
643  itsHSV[i].sis *= (1.0F + spread * i);
644  itsHSV[i].siv *= (1.0F + spread * i);
645  itsHSV[i].fix();
646  }
647  }
648  }
649  }
650 
651  // ####################################################################################################
652  //! Clean up the detections by eliminating duplicates:
654  {
655  bool keepgoing = true;
656  double const iouth = iou::get();
657 
658  while (keepgoing)
659  {
660  // We will stop if we do not eliminate any more objects:
661  keepgoing = false; int delidx = -1;
662 
663  // Loop over all pairs of objects:
664  size_t const siz = itsDetections.size();
665  for (size_t i = 0; i < siz; ++i)
666  {
667  for (size_t j = 0; j < i; ++j)
668  {
669  std::vector<cv::Point> pts = itsDetections[i].hull;
670  for (cv::Point const & p : itsDetections[j].hull) pts.push_back(p);
671  std::vector<cv::Point> hull;
672  cv::convexHull(pts, hull); // FIXME should do a true union! this is just an approximation to it
673  double uarea = cv::contourArea(hull);
674  double iarea = cv::contourArea(itsDetections[i].hull) + cv::contourArea(itsDetections[j].hull) - uarea;
675 
676  // note: object detection code guarantees non-zero area:
677  double const inoun = iarea / uarea;
678  if (inoun >= iouth)
679  {
680  if (itsDetections[i].serr > itsDetections[j].serr) delidx = j; else delidx = i;
681  break;
682  }
683  }
684  if (delidx != -1) break;
685  }
686  if (delidx != -1) { itsDetections.erase(itsDetections.begin() + delidx); keepgoing = true; }
687  }
688  }
689 
690  // ####################################################################################################
691  //! Learn and update our HSV ranges
692  void learnHSV(size_t nthreads, cv::Mat const & imgbgr, jevois::RawImage *outimg = nullptr)
693  {
694  int const w = imgbgr.cols, h = imgbgr.rows;
695 
696  // Compute the median filtered BGR image in a thread:
697  cv::Mat medimgbgr;
698  auto median_fut = jevois::async([&](){ cv::medianBlur(imgbgr, medimgbgr, 3); } );
699 
700  // Get all the cleaned-up contours:
701  std::vector<std::vector<cv::Point> > contours;
702  for (detection const & d : itsDetections) contours.push_back(d.contour);
703 
704  // If desired, draw all contours:
705  std::future<void> drawc_fut;
706  if (debug::get() && outimg && outimg->valid())
707  drawc_fut = jevois::async([&]() {
708  // We reinterpret the top portion of our YUYV output image as an opencv 8UC2 image:
709  cv::Mat outuc2(outimg->height, outimg->width, CV_8UC2, outimg->pixelsw<unsigned char>());
710  cv::drawContours(outuc2, contours, -1, jevois::yuyv::LightPink, 2);
711  } );
712 
713  // Draw all the filled contours into a binary mask image:
714  cv::Mat mask(h, w, CV_8UC1, (unsigned char)0);
715  cv::drawContours(mask, contours, -1, 255, -1); // last -1 is for filled
716 
717  // Wait until median filter is done:
718  median_fut.get();
719 
720  // Compute mean and std BGR values inside objects:
721  cv::Mat mean, std;
722  cv::meanStdDev(medimgbgr, mean, std, mask);
723 
724  // Convert to HSV:
725  cv::Mat bgrmean(2, 1, CV_8UC3); bgrmean.at<cv::Vec3b>(0, 0) = mean; bgrmean.at<cv::Vec3b>(1, 0) = std;
726  cv::Mat hsvmean; cv::cvtColor(bgrmean, hsvmean, cv::COLOR_BGR2HSV);
727 
728  cv::Vec3b hsv = hsvmean.at<cv::Vec3b>(0, 0);
729  int H = hsv.val[0], S = hsv.val[1], V = hsv.val[2];
730 
731  cv::Vec3b sighsv = hsvmean.at<cv::Vec3b>(1, 0);
732  int sH = sighsv.val[0], sS = sighsv.val[1], sV = sighsv.val[2];
733 
734  // Set the new measurements:
735  itsKalH->set(H); itsKalS->set(S); itsKalV->set(V);
736 
737  if (nthreads > 2)
738  {
739  float const eta = 0.4F;
740  itsHSV[2].sih = (1.0F - eta) * itsHSV[2].sih + eta * sH;
741  itsHSV[2].sis = (1.0F - eta) * itsHSV[2].sis + eta * sS;
742  itsHSV[2].siv = (1.0F - eta) * itsHSV[2].siv + eta * sV;
743  itsHSV[2].fix();
744  }
745 
746  // note: drawc_fut may block us here until it is complete.
747  }
748 
749  // ####################################################################################################
750  //! Send serial messages about each detection:
751  void sendAllSerial(int w, int h, std::vector<std::vector<cv::Point2f> > const & corners,
752  std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
753  {
754  if (rvecs.empty() == false)
755  {
756  // If we have rvecs and tvecs, we are doing 3D pose estimation, so send a 3D message:
757  auto const osiz = objsize::get();
758  for (size_t i = 0; i < corners.size(); ++i)
759  {
760  cv::Vec3d const & rv = rvecs[i];
761  cv::Vec3d const & tv = tvecs[i];
762 
763  // Compute quaternion:
764  float theta = std::sqrt(rv[0] * rv[0] + rv[1] * rv[1] + rv[2] * rv[2]);
765  Eigen::Vector3f axis(rv[0], rv[1], rv[2]);
766  Eigen::Quaternion<float> q(Eigen::AngleAxis<float>(theta, axis));
767 
768  sendSerialStd3D(tv[0], tv[1], tv[2], // position
769  osiz.width, osiz.height, 1.0F, // size
770  q.w(), q.x(), q.y(), q.z(), // pose
771  "FIRST"); // FIRST robotics shape
772  }
773  }
774  else
775  {
776  // Send one 2D message per object:
777  for (size_t i = 0; i < corners.size(); ++i)
778  sendSerialContour2D(w, h, corners[i], "FIRST");
779  }
780  }
781 
782  // ####################################################################################################
783  //! Update the morphology structuring elements if needed
785  {
786  int e = erodesize::get();
787  if (e != itsErodeElement.cols)
788  {
789  if (e) itsErodeElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(e, e));
790  else itsErodeElement.release();
791  }
792 
793  int d = dilatesize::get();
794  if (d != itsDilateElement.cols)
795  {
796  if (d) itsDilateElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(d, d));
797  else itsDilateElement.release();
798  }
799  }
800 
801  // ####################################################################################################
802  //! Processing function, no USB video output
803  virtual void process(jevois::InputFrame && inframe) override
804  {
805  static jevois::Timer timer("processing");
806 
807  // Wait for next available camera image. Any resolution ok:
808  jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
809 
810  timer.start();
811 
812  // Load camera calibration if needed:
813  if (itsCamMatrix.empty()) loadCameraCalibration(w, h);
814 
815  // Convert input image to BGR24, then to HSV:
816  cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
817  cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
818  size_t const nthreads = threads::get();
819 
820  // Make sure our HSV range parameters are up to date:
821  updateHSV(nthreads);
822 
823  // Clear any old detections and get ready to parallelize the detection work:
824  itsDetections.clear();
826 
827  // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
828  std::vector<std::future<void> > dfut;
829  for (size_t i = 0; i < nthreads - 1; ++i)
830  dfut.push_back(jevois::async([&](size_t tn) { detect(imghsv, tn, 3, h+2); }, i));
831  detect(imghsv, nthreads - 1, 3, h+2);
832 
833  // Wait for all threads to complete:
834  for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
835 
836  // Let camera know we are done processing the input image:
837  inframe.done();
838 
839  // Clean up the detections by eliminating duplicates:
841 
842  // Learn the object's HSV value over time:
843  auto learn_fut = jevois::async([&]() { learnHSV(nthreads, imgbgr); });
844 
845  // Map to 6D (inverse perspective):
846  std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
847  estimatePose(corners, rvecs, tvecs);
848 
849  // Send all serial messages:
850  sendAllSerial(w, h, corners, rvecs, tvecs);
851 
852  // Wait for all threads:
853  try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
854 
855  // Show processing fps:
856  timer.stop();
857  }
858 
859  // ####################################################################################################
860  //! Processing function, with USB video output
861  virtual void process(jevois::InputFrame && inframe, jevois::OutputFrame && outframe) override
862  {
863  static jevois::Timer timer("processing");
864 
865  // Wait for next available camera image. Any resolution ok, but require YUYV since we assume it for drawings:
866  jevois::RawImage inimg = inframe.get(); unsigned int const w = inimg.width, h = inimg.height;
867  inimg.require("input", w, h, V4L2_PIX_FMT_YUYV);
868 
869  timer.start();
870 
871  // Load camera calibration if needed:
872  if (itsCamMatrix.empty()) loadCameraCalibration(w, h);
873 
874  // While we process it, start a thread to wait for output frame and paste the input image into it:
875  jevois::RawImage outimg; // main thread should not use outimg until paste thread is complete
876  auto paste_fut = jevois::async([&]() {
877  outimg = outframe.get();
878  outimg.require("output", outimg.width, h + 50, inimg.fmt);
879  if (outimg.width != w && outimg.width != w * 2) LFATAL("Output image width should be 1x or 2x input width");
880  jevois::rawimage::paste(inimg, outimg, 0, 0);
881  jevois::rawimage::writeText(outimg, "JeVois FIRST Vision", 3, 3, jevois::yuyv::White);
882  jevois::rawimage::drawFilledRect(outimg, 0, h, outimg.width, outimg.height-h, jevois::yuyv::Black);
883  });
884 
885  // Convert input image to BGR24, then to HSV:
886  cv::Mat imgbgr = jevois::rawimage::convertToCvBGR(inimg);
887  cv::Mat imghsv; cv::cvtColor(imgbgr, imghsv, cv::COLOR_BGR2HSV);
888  size_t const nthreads = threads::get();
889 
890  // Make sure our HSV range parameters are up to date:
891  updateHSV(nthreads);
892 
893  // Clear any old detections and get ready to parallelize the detection work:
894  itsDetections.clear();
896 
897  // Launch our workers: run nthreads-1 new threads, and last worker in our current thread:
898  std::vector<std::future<void> > dfut;
899  for (size_t i = 0; i < nthreads - 1; ++i)
900  dfut.push_back(jevois::async([&](size_t tn) { detect(imghsv, tn, 3, h+2, &outimg); }, i));
901  detect(imghsv, nthreads - 1, 3, h+2, &outimg);
902 
903  // Wait for all threads to complete:
904  for (auto & f : dfut) try { f.get(); } catch (...) { jevois::warnAndIgnoreException(); }
905 
906  // Wait for paste to finish up:
907  paste_fut.get();
908 
909  // Let camera know we are done processing the input image:
910  inframe.done();
911 
912  // Clean up the detections by eliminating duplicates:
914 
915  // Learn the object's HSV value over time:
916  auto learn_fut = jevois::async([&]() { learnHSV(nthreads, imgbgr, &outimg); });
917 
918  // Map to 6D (inverse perspective):
919  std::vector<std::vector<cv::Point2f> > corners; std::vector<cv::Vec3d> rvecs, tvecs;
920  estimatePose(corners, rvecs, tvecs);
921 
922  // Send all serial messages:
923  sendAllSerial(w, h, corners, rvecs, tvecs);
924 
925  // Draw all detections in 3D:
926  drawDetections(outimg, corners, rvecs, tvecs);
927 
928  // Show number of detected objects:
929  jevois::rawimage::writeText(outimg, "Detected " + std::to_string(itsDetections.size()) + " objects.",
930  w + 3, 3, jevois::yuyv::White);
931 
932  // Wait for all threads:
933  try { learn_fut.get(); } catch (...) { jevois::warnAndIgnoreException(); }
934 
935  // Show processing fps:
936  std::string const & fpscpu = timer.stop();
937  jevois::rawimage::writeText(outimg, fpscpu, 3, h - 13, jevois::yuyv::White);
938 
939  // Send the output image with our processing results to the host over USB:
940  outframe.send();
941  }
942 
943  // ####################################################################################################
944  void drawDetections(jevois::RawImage & outimg, std::vector<std::vector<cv::Point2f> > corners,
945  std::vector<cv::Vec3d> const & rvecs, std::vector<cv::Vec3d> const & tvecs)
946  {
947  auto const osiz = objsize::get(); float const w = osiz.width, h = osiz.height;
948  int nobj = int(corners.size());
949 
950  // This code is like drawDetectedMarkers() in cv::aruco, but for YUYV output image:
951  if (rvecs.empty())
952  {
953  // We are not doing 3D pose estimation. Just draw object outlines in 2D:
954  for (int i = 0; i < nobj; ++i)
955  {
956  std::vector<cv::Point2f> const & obj = corners[i];
957 
958  // draw marker sides:
959  for (int j = 0; j < 4; ++j)
960  {
961  cv::Point2f const & p0 = obj[j];
962  cv::Point2f const & p1 = obj[ (j+1) % 4 ];
963  jevois::rawimage::drawLine(outimg, int(p0.x + 0.5F), int(p0.y + 0.5F),
964  int(p1.x + 0.5F), int(p1.y + 0.5F), 1, jevois::yuyv::LightPink);
965  //jevois::rawimage::writeText(outimg, std::to_string(j),
966  // int(p0.x + 0.5F), int(p0.y + 0.5F), jevois::yuyv::White);
967  }
968  }
969  }
970  else
971  {
972  // Show trihedron and parallelepiped centered on object:
973  float const hw = w * 0.5F, hh = h * 0.5F, dd = -0.5F * std::max(w, h);
974 
975  for (int i = 0; i < nobj; ++i)
976  {
977  // Project axis points:
978  std::vector<cv::Point3f> axisPoints;
979  axisPoints.push_back(cv::Point3f(0.0F, 0.0F, 0.0F));
980  axisPoints.push_back(cv::Point3f(hw, 0.0F, 0.0F));
981  axisPoints.push_back(cv::Point3f(0.0F, hh, 0.0F));
982  axisPoints.push_back(cv::Point3f(0.0F, 0.0F, dd));
983 
984  std::vector<cv::Point2f> imagePoints;
985  cv::projectPoints(axisPoints, rvecs[i], tvecs[i], itsCamMatrix, itsDistCoeffs, imagePoints);
986 
987  // Draw axis lines:
988  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
989  int(imagePoints[1].x + 0.5F), int(imagePoints[1].y + 0.5F),
990  2, jevois::yuyv::MedPurple);
991  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
992  int(imagePoints[2].x + 0.5F), int(imagePoints[2].y + 0.5F),
993  2, jevois::yuyv::MedGreen);
994  jevois::rawimage::drawLine(outimg, int(imagePoints[0].x + 0.5F), int(imagePoints[0].y + 0.5F),
995  int(imagePoints[3].x + 0.5F), int(imagePoints[3].y + 0.5F),
996  2, jevois::yuyv::MedGrey);
997 
998  // Also draw a parallelepiped:
999  std::vector<cv::Point3f> cubePoints;
1000  cubePoints.push_back(cv::Point3f(-hw, -hh, 0.0F));
1001  cubePoints.push_back(cv::Point3f(hw, -hh, 0.0F));
1002  cubePoints.push_back(cv::Point3f(hw, hh, 0.0F));
1003  cubePoints.push_back(cv::Point3f(-hw, hh, 0.0F));
1004  cubePoints.push_back(cv::Point3f(-hw, -hh, dd));
1005  cubePoints.push_back(cv::Point3f(hw, -hh, dd));
1006  cubePoints.push_back(cv::Point3f(hw, hh, dd));
1007  cubePoints.push_back(cv::Point3f(-hw, hh, dd));
1008 
1009  std::vector<cv::Point2f> cuf;
1010  cv::projectPoints(cubePoints, rvecs[i], tvecs[i], itsCamMatrix, itsDistCoeffs, cuf);
1011 
1012  // Round all the coordinates:
1013  std::vector<cv::Point> cu;
1014  for (auto const & p : cuf) cu.push_back(cv::Point(int(p.x + 0.5F), int(p.y + 0.5F)));
1015 
1016  // Draw parallelepiped lines:
1017  jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[1].x, cu[1].y, 1, jevois::yuyv::LightGreen);
1018  jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[2].x, cu[2].y, 1, jevois::yuyv::LightGreen);
1019  jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[3].x, cu[3].y, 1, jevois::yuyv::LightGreen);
1020  jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[0].x, cu[0].y, 1, jevois::yuyv::LightGreen);
1021  jevois::rawimage::drawLine(outimg, cu[4].x, cu[4].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
1022  jevois::rawimage::drawLine(outimg, cu[5].x, cu[5].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
1023  jevois::rawimage::drawLine(outimg, cu[6].x, cu[6].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
1024  jevois::rawimage::drawLine(outimg, cu[7].x, cu[7].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
1025  jevois::rawimage::drawLine(outimg, cu[0].x, cu[0].y, cu[4].x, cu[4].y, 1, jevois::yuyv::LightGreen);
1026  jevois::rawimage::drawLine(outimg, cu[1].x, cu[1].y, cu[5].x, cu[5].y, 1, jevois::yuyv::LightGreen);
1027  jevois::rawimage::drawLine(outimg, cu[2].x, cu[2].y, cu[6].x, cu[6].y, 1, jevois::yuyv::LightGreen);
1028  jevois::rawimage::drawLine(outimg, cu[3].x, cu[3].y, cu[7].x, cu[7].y, 1, jevois::yuyv::LightGreen);
1029  }
1030  }
1031  }
1032 };
1033 
1034 // Allow the module to be loaded as a shared object (.so) file:
FirstVision::hsvcue::hsvcue
hsvcue(unsigned char h, unsigned char hsig, unsigned char s, unsigned char ssig, unsigned char v, unsigned char vsig)
Constructor.
Definition: FirstVision.C:318
FirstVision::onParamChange
void onParamChange(vcue const &, unsigned char const &) override
Definition: FirstVision.C:304
demo.str
str
Definition: demo.py:35
FirstVision::FirstVision
FirstVision(std::string const &instance)
Constructor.
Definition: FirstVision.C:403
jevois::StdModule::sendSerialContour2D
void sendSerialContour2D(unsigned int camw, unsigned int camh, std::vector< cv::Point_< T > > points, std::string const &id="", std::string const &extra="")
jevois::Range
FirstVision::learnHSV
void learnHSV(size_t nthreads, cv::Mat const &imgbgr, jevois::RawImage *outimg=nullptr)
Learn and update our HSV ranges.
Definition: FirstVision.C:692
jevois::OutputFrame
FirstVision::detection::serr
float serr
Shape error score (higher for rougher contours with defects)
Definition: FirstVision.C:353
jevois::async
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
FirstVision::process
virtual void process(jevois::InputFrame &&inframe, jevois::OutputFrame &&outframe) override
Processing function, with USB video output.
Definition: FirstVision.C:861
demo.int
int
Definition: demo.py:37
quantize-inc.q
q
Definition: quantize-inc.py:95
Timer.H
FirstVision::detection::hull
std::vector< cv::Point > hull
Convex hull of the contour.
Definition: FirstVision.C:351
Module.H
FirstVision::itsErodeElement
cv::Mat itsErodeElement
Erosion and dilation kernels shared across all detect threads.
Definition: FirstVision.C:364
jevois::sformat
std::string sformat(char const *fmt,...) __attribute__((format(__printf__
FirstVision::itsCueChanged
bool itsCueChanged
True when users change ranges.
Definition: FirstVision.C:300
FirstVision::detection
Helper struct for a detected object.
Definition: FirstVision.C:347
FirstVision::updateHSV
void updateHSV(size_t nthreads)
Initialize (e.g., if user changes cue params) or update our HSV detection ranges.
Definition: FirstVision.C:608
FirstVision::itsDistCoeffs
cv::Mat itsDistCoeffs
Our camera distortion coefficients.
Definition: FirstVision.C:299
end
Send serial message to mark the the end(MARK STOP)
FirstVision::detection::threadnum
size_t threadnum
Thread number that detected this object.
Definition: FirstVision.C:352
FirstVision::~FirstVision
virtual ~FirstVision()
Virtual destructor for safe inheritance.
Definition: FirstVision.C:412
Coordinates.H
Log.H
FirstVision::detection::contour
std::vector< cv::Point > contour
The full detailed contour.
Definition: FirstVision.C:349
FirstVision::hsvcue::siv
float siv
Mean and sigma for V.
Definition: FirstVision.C:340
FirstVision::itsDetMtx
std::mutex itsDetMtx
Definition: FirstVision.C:358
FirstVision::SinglePoseEstimationParallel::operator()
void operator()(cv::Range const &range) const
Definition: FirstVision.C:379
jevois::RawImage
FirstVision::sendAllSerial
void sendAllSerial(int w, int h, std::vector< std::vector< cv::Point2f > > const &corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Send serial messages about each detection:
Definition: FirstVision.C:751
FirstVision::hsvcue::sih
float sih
Mean and sigma for H.
Definition: FirstVision.C:338
FirstVision::estimatePose
void estimatePose(std::vector< std::vector< cv::Point2f > > &corners, cv::OutputArray _rvecs, cv::OutputArray _tvecs)
Estimate 6D pose of detected objects, if dopose parameter is true, otherwise just 2D corners.
Definition: FirstVision.C:418
jevois::Timer::start
void start()
jevois::rawimage::convertToCvBGR
cv::Mat convertToCvBGR(RawImage const &src)
jevois::ParameterCategory
FirstVision::hsvcue::rmin
cv::Scalar rmin() const
Get minimum triplet for use by cv::inRange()
Definition: FirstVision.C:331
FirstVision::hsvcue::fix
void fix()
Fix ranges so they don't go out of bounds.
Definition: FirstVision.C:323
LERROR
#define LERROR(msg)
jevois::RawImage::require
void require(char const *info, unsigned int w, unsigned int h, unsigned int f) const
FirstVision::detect
void detect(cv::Mat const &imghsv, size_t tnum, int dispx=3, int dispy=242, jevois::RawImage *outimg=nullptr)
HSV object detector, we run several of those in parallel with different hsvcue settings.
Definition: FirstVision.C:475
FirstVision::loadCameraCalibration
void loadCameraCalibration(unsigned int w, unsigned int h)
Load camera calibration parameters.
Definition: FirstVision.C:451
FirstVision::hsvcue::muv
float muv
Definition: FirstVision.C:340
FirstVision::detection::approx
std::vector< cv::Point > approx
Smoothed approximation of the contour.
Definition: FirstVision.C:350
jevois::RawImage::width
unsigned int width
jevois::rawimage::writeText
void writeText(RawImage &img, std::string const &txt, int x, int y, unsigned int col, Font font=Font6x10)
jevois
F
float F
area
double area(const std::vector< Point2D< T > > &polygon, const bool getsigned=false)
What is the area of a polygon?
Definition: Point2D.H:422
FirstVision::itsDetections
std::vector< detection > itsDetections
Our detections, combined across all threads.
Definition: FirstVision.C:357
FirstVision::hsvcue::rmax
cv::Scalar rmax() const
Get maximum triplet for use by cv::inRange()
Definition: FirstVision.C:335
FirstVision::drawDetections
void drawDetections(jevois::RawImage &outimg, std::vector< std::vector< cv::Point2f > > corners, std::vector< cv::Vec3d > const &rvecs, std::vector< cv::Vec3d > const &tvecs)
Definition: FirstVision.C:944
jevois::Timer::stop
const std::string & stop(double *seconds)
FirstVision::onParamChange
void onParamChange(hcue const &, unsigned char const &) override
Definition: FirstVision.C:302
jevois::rawimage::drawFilledRect
void drawFilledRect(RawImage &img, int x, int y, unsigned int w, unsigned int h, unsigned int col)
FirstVision::process
virtual void process(jevois::InputFrame &&inframe) override
Processing function, no USB video output.
Definition: FirstVision.C:803
FirstVision::cleanupDetections
void cleanupDetections()
Clean up the detections by eliminating duplicates:
Definition: FirstVision.C:653
jevois::warnAndIgnoreException
std::string warnAndIgnoreException(std::string const &prefix="")
FirstVision::SinglePoseEstimationParallel
ParallelLoopBody class for the parallelization of the single markers pose estimation.
Definition: FirstVision.C:369
FirstVision::itsKalS
std::shared_ptr< Kalman1D > itsKalS
Definition: FirstVision.C:361
jevois::StdModule::StdModule
StdModule(std::string const &instance)
LFATAL
#define LFATAL(msg)
RawImageOps.H
FirstVision::SinglePoseEstimationParallel::SinglePoseEstimationParallel
SinglePoseEstimationParallel(cv::Mat &_objPoints, cv::InputArrayOfArrays _corners, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::Mat &_rvecs, cv::Mat &_tvecs)
Definition: FirstVision.C:372
FirstVision::hsvcue::sis
float sis
Mean and sigma for S.
Definition: FirstVision.C:339
FirstVision
Simple color-based detection of a U-shaped object for FIRST Robotics.
Definition: FirstVision.C:292
jevois::RawImage::height
unsigned int height
to_string
std::string to_string(T const &val)
jevois::InputFrame
FirstVision::updateStructuringElements
void updateStructuringElements()
Update the morphology structuring elements if needed.
Definition: FirstVision.C:784
FirstVision::itsKalH
std::shared_ptr< Kalman1D > itsKalH
Kalman filters to learn and adapt HSV windows over time.
Definition: FirstVision.C:361
Utils.H
FirstVision::hsvcue::hsvcue
hsvcue(unsigned char h, unsigned char s, unsigned char v)
Constructor.
Definition: FirstVision.C:314
FirstVision::itsKalV
std::shared_ptr< Kalman1D > itsKalV
Definition: FirstVision.C:361
FirstVision::hsvcue
Helper struct for an HSV range triplet, where each range is specified as a mean and sigma:
Definition: FirstVision.C:311
jevois::rawimage::paste
void paste(RawImage const &src, RawImage &dest, int dx, int dy)
jevois::RawImage::fmt
unsigned int fmt
V
ImVec2 V
FirstVision::itsCamMatrix
cv::Mat itsCamMatrix
Our camera matrix.
Definition: FirstVision.C:298
FirstVision::onParamChange
void onParamChange(scue const &, unsigned char const &) override
Definition: FirstVision.C:303
Kalman1D.H
H
#define H(p, w)
FirstVision::hsvcue::muh
float muh
Definition: FirstVision.C:338
h
int h
jevois::StdModule::sendSerialStd3D
void sendSerialStd3D(float x, float y, float z, float w=0.0F, float h=0.0F, float d=0.0F, float q1=0.0F, float q2=0.0F, float q3=0.0f, float q4=0.0F, std::string const &id="", std::string const &extra="")
freeze
void freeze()
jevois::StdModule
jevois::rawimage::drawLine
void drawLine(RawImage &img, int x1, int y1, int x2, int y2, unsigned int thick, unsigned int col)
Darknet::JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK
JEVOIS_DECLARE_PARAMETER_WITH_CALLBACK(dataroot, std::string, "Root path for data, config, and weight files. " "If empty, use the module's path.", JEVOIS_SHARE_PATH "/darknet/single", ParamCateg)
Parameter.
ARtoolkit::JEVOIS_DECLARE_PARAMETER
JEVOIS_DECLARE_PARAMETER(camparams, std::string, "File stem of camera parameters, or empty. Camera resolution " "will be appended, as well as a .dat extension. For example, specifying 'camera_para' " "here and running the camera sensor at 320x240 will attempt to load " "camera_para320x240.dat from within the module's directory (if relative stem) or " "from the specified absolute location (if absolute stem).", JEVOIS_SHARE_PATH "/camera/camera_para", ParamCateg)
Parameter.
jevois::rawimage::pasteGreyToYUYV
void pasteGreyToYUYV(cv::Mat const &src, RawImage &dest, int dx, int dy)
LINFO
#define LINFO(msg)
JEVOIS_REGISTER_MODULE
JEVOIS_REGISTER_MODULE(FirstVision)
demo.w
w
Definition: demo.py:85
FirstVision::hsvcue::mus
float mus
Definition: FirstVision.C:339
FirstVision::itsHSV
std::vector< hsvcue > itsHSV
Definition: FirstVision.C:343
FirstVision::itsDilateElement
cv::Mat itsDilateElement
Definition: FirstVision.C:364
jevois::Timer