JeVoisBase  1.22
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
Loading...
Searching...
No Matches
JeVoisIntro.C
Go to the documentation of this file.
1// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2//
3// JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
4// California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5//
6// This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7// redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8// Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10// License for more details. You should have received a copy of the GNU General Public License along with this program;
11// if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12//
13// Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14// Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16/*! \file */
17
18#include <jevois/Core/Module.H>
19
20#include <jevois/Debug/Log.H>
21#include <jevois/Debug/Timer.H>
28
29#include <opencv2/core/core.hpp>
30#include <opencv2/imgproc/imgproc.hpp>
31#include <linux/videodev2.h> // for v4l2 pixel types
32//#include <opencv2/highgui/highgui.hpp> // used for debugging only, see imshow below
33
34// icon by Freepik in interface at flaticon
35
36struct ScriptItem { char const * msg; int blinkx, blinky; };
37static ScriptItem const TheScript[] = {
38 { "Hello! Welcome to this simple demonstration of JeVois", 0, 0 },
39 { "JeVois = camera sensor + quad-core processor + USB video output", 0, 0 },
40 { "This demo is running on the small processor inside JeVois", 0, 0 },
41 { "Neat, isn't it?", 0, 0 },
42 { "", 0, 0 },
43 { "We will help you discover what you see on this screen", 0, 0 },
44 { "We will use this blinking marker to point at things:", 600, 335 },
45 { "", 0, 0 },
46 { "Now a brief tutorial...", 0, 0 },
47 { "", 0, 0 },
48 { "This demo: Attention + Gist + Faces + Objects", 0, 0 },
49 { "Attention: detect things that catch the human eye", 0, 0 },
50 { "Pink square in video above: most interesting (salient) location", 0, 0 },
51 { "Green circle in video above: smoothed attention trajectory", 0, 0 },
52 { "Try it: wave at JeVois, show it some objects, move it around", 0, 0 },
53 { "", 0, 0 },
54 { "Did you catch the attention of JeVois?", 0, 0 },
55 { "", 0, 0 },
56 { "Attention is guided by color contrast, ...", 40, 270 },
57 { "by luminance (intensity) contrast, ...", 120, 270 },
58 { "by oriented edges, ...", 200, 270 },
59 { "by flickering or blinking lights, ...", 280, 270 },
60 { "and by moving objects.", 360, 270 },
61 { "All these visual cues combine into a measure of saliency", 480, 120 },
62 { "or visual interest for every location in view.", 480, 120 },
63 { "", 0, 0 },
64 { "", 0, 0 },
65 { "Gist: statistical summary of a scene, also based on ...", 440, 270 },
66 { "color, intensity, orientation, flicker and motion features.", 0, 0 },
67 { "Gist can be used to recognize places, such as a kitchen or ...", 0, 0 },
68 { "a bathroom, or a road turning left versus turning right.", 0, 0 },
69 { "Try it: point JeVois to different things and see gist change", 440, 270 },
70 { "", 0, 0 },
71 { "", 0, 0 },
72 { "Face detection finds human faces in the camera's view", 612, 316 },
73 { "Try it: point JeVois towards a face. Adjust distance until ...", 612, 316 },
74 { "the face fits inside the attention pink square. When a face ...", 0, 0 },
75 { "is detected, it will appear in the bottom-right corner.", 612, 316 },
76 { "You may have to move a bit farther than arm's length for ...", 0, 0 },
77 { "your face to fit inside the attention pink square.", 0, 0 },
78 { "", 0, 0 },
79 { "", 0, 0 },
80 { "Objects: Here we recognize handwritten digits using ...", 525, 316 },
81 { "deep neural networks. Try it! Draw a number on paper ...", 0, 0 },
82 { "and point JeVois towards it. Adjust distance until the", 0, 0 },
83 { "number fits in the attention pink square.", 0, 0 },
84 { "", 0, 0 },
85 { "Recognized digits are shown near the detected faces.", 525, 316 },
86 { "", 0, 0 },
87 { "If your number is too small, too big, or not upright ...", 0, 0 },
88 { "keep adjusting the distance and angle of the camera.", 0, 0 },
89 { "", 0, 0 },
90 { "Recognition scores for digits 0 to 9 are shown above.", 464, 310 },
91 { "Sometimes the neural network makes mistakes and thinks it ...", 0, 0 },
92 { "found a digit when actually it is looking at something else.", 0, 0 },
93 { "This is still a research issue", 0, 0 },
94 { "but machine vision is improving fast, so stay tuned!", 0, 0 },
95 { "", 0, 0 },
96 { "With JeVois the future of machine vision is in your hands.", 0, 0 },
97 { "", 0, 0 },
98 { "", 0, 0 },
99 { "", 0, 0 },
100 { "This tutorial is now complete. It will restart.", 0, 0 },
101 { "", 0, 0 },
102 { nullptr, 0, 0 }
103};
104
105//! Simple introduction to JeVois and demo that combines saliency, gist, face detection, and object recognition
106/*! This module plays an introduction movie, and then launches the equivalent of the \jvmod{DemoSalGistFaceObj} module,
107 but with some added text messages that explain what is going on, on the screen.
108
109 Try it and follow the instructions on screen!
110
111 @author Laurent Itti
112
113 @displayname JeVois Intro
114 @videomapping YUYV 640 360 50.0 YUYV 320 240 50.0 JeVois JeVoisIntro
115 @videomapping YUYV 640 480 50.0 YUYV 320 240 50.0 JeVois JeVoisIntro
116 @email itti\@usc.edu
117 @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
118 @copyright Copyright (C) 2016 by Laurent Itti, iLab and the University of Southern California
119 @mainurl http://jevois.org
120 @supporturl http://jevois.org/doc
121 @otherurl http://iLab.usc.edu
122 @license GPL v3
123 @distribution Unrestricted
124 @restrictions None
125 \ingroup modules */
127{
128 public:
129 //! Constructor
130 JeVoisIntro(std::string const & instance) : jevois::StdModule(instance), itsScoresStr(" ")
131 {
132 itsSaliency = addSubComponent<Saliency>("saliency");
133 itsFaceDetector = addSubComponent<FaceDetector>("facedetect");
134 itsObjectRecognition = addSubComponent<ObjectRecognitionMNIST>("MNIST");
135 itsKF = addSubComponent<Kalman2D>("kalman");
136 itsVideo = addSubComponent<BufferedVideoReader>("intromovie");
137 }
138
139 //! Virtual destructor for safe inheritance
140 virtual ~JeVoisIntro() { }
141
142 //! Initialization once parameters are set:
143 virtual void postInit() override
144 {
145 // Read the banner image and convert to YUYV RawImage:
146 cv::Mat banner_bgr = cv::imread(absolutePath("jevois-banner-notext.png"));
147 itsBanner.width = banner_bgr.cols;
148 itsBanner.height = banner_bgr.rows;
149 itsBanner.fmt = V4L2_PIX_FMT_YUYV;
151 itsBanner.buf.reset(new jevois::VideoBuf(-1, itsBanner.bytesize(), 0, -1));
153
154 // Allow our movie to load a bit:
155 std::this_thread::sleep_for(std::chrono::milliseconds(750));
156 }
157
158 //! Processing function
159 virtual void process(jevois::InputFrame && inframe, jevois::OutputFrame && outframe) override
160 {
161 static jevois::Timer itsProcessingTimer("Processing");
162 static cv::Mat itsLastFace(60, 60, CV_8UC2, 0x80aa) ; // Note that this one will contain raw YUV pixels
163 static cv::Mat itsLastObject(60, 60, CV_8UC2, 0x80aa) ; // Note that this one will contain raw YUV pixels
164 static std::string itsLastObjectCateg;
165 static bool doobject = false; // alternate between object and face recognition
166 static bool intromode = false; // intro mode plays a video at the beginning, then shows some info messages
167 static bool intromoviedone = false; // turns true when intro movie complete
168 static ScriptItem const * scriptitem = &TheScript[0];
169 static int scriptframe = 0;
170 unsigned short const txtcol = jevois::yuyv::White;
171
172 // Wait for next available camera image:
173 jevois::RawImage inimg = inframe.get();
174
175 // We only handle one specific input format in this demo:
176 inimg.require("input", 320, 240, V4L2_PIX_FMT_YUYV);
177
178 itsProcessingTimer.start();
179 int const roihw = 32; // face & object roi half width and height
180
181 // In a thread, wait for an image from our gadget driver into which we will put our results:
182 jevois::RawImage outimg;
183 auto paste_fut =
184 jevois::async([&]()
185 {
186 outimg = outframe.get();
187 outimg.require("output", 640, outimg.height, V4L2_PIX_FMT_YUYV);
188 switch (outimg.height)
189 {
190 case 312: break; // normal mode
191 case 360:
192 case 480: intromode = true; break; // intro mode
193 default: LFATAL("Incorrect output height: should be 312, 360 or 480");
194 }
195
196 // Play the intro movie first if requested:
197 if (intromode && intromoviedone == false)
198 {
199 cv::Mat m = itsVideo->get();
200
201 if (m.empty()) intromoviedone = true;
202 else
203 {
205
206 // Handle bottom of the frame: blank or banner
207 if (outimg.height == 480)
208 jevois::rawimage::paste(itsBanner, outimg, 0, 360);
209 else if (outimg.height > 360)
210 jevois::rawimage::drawFilledRect(outimg, 0, 360, outimg.width, outimg.height - 360, 0x8000);
211
212 // If on a mac with height = 480, need to flip horizontally for photobooth to work (will flip again):
213 if (outimg.height == 480) jevois::rawimage::hFlipYUYV(outimg);
214 }
215 }
216 else
217 {
218 // Paste the original image to the top-left corner of the display:
219 jevois::rawimage::paste(inimg, outimg, 0, 0);
220 jevois::rawimage::writeText(outimg, "JeVois Saliency + Gist + Faces + Objects", 3, 3, txtcol);
221 }
222 });
223
224 // Compute saliency:
225 itsSaliency->process(inimg, true);
226 paste_fut.get();
227 inframe.done();
228 if (intromode && intromoviedone == false) { outframe.send(); return; }
229
230 // find most salient point:
231 int mx, my; intg32 msal;
232 itsSaliency->getSaliencyMax(mx, my, msal);
233
234 // Scale back to original image coordinates:
235 int const smlev = itsSaliency->smscale::get();
236 int const smadj = smlev > 0 ? (1 << (smlev-1)) : 0; // half a saliency map pixel adjustment
237 int const dmx = (mx << smlev) + smadj;
238 int const dmy = (my << smlev) + smadj;
239
240 // Compute instantaneous attended ROI (note: coords must be even to avoid flipping U/V when we later paste):
241 int const rx = std::min(int(inimg.width) - roihw, std::max(roihw, dmx));
242 int const ry = std::min(int(inimg.height) - roihw, std::max(roihw, dmy));
243
244 // Asynchronously launch a bunch of saliency drawings and filter the attended locations
245 auto draw_fut =
246 jevois::async([&]() {
247 // Paste the various saliency results:
248 drawMap(outimg, &itsSaliency->salmap, 320, 0, 16, 20);
249 jevois::rawimage::writeText(outimg, "Saliency Map", 640 - 12*6-4, 3, txtcol);
250
251 drawMap(outimg, &itsSaliency->color, 0, 240, 4, 18);
252 jevois::rawimage::writeText(outimg, "Color", 3, 243, txtcol);
253
254 drawMap(outimg, &itsSaliency->intens, 80, 240, 4, 18);
255 jevois::rawimage::writeText(outimg, "Intensity", 83, 243, txtcol);
256
257 drawMap(outimg, &itsSaliency->ori, 160, 240, 4, 18);
258 jevois::rawimage::writeText(outimg, "Orientation", 163, 243, txtcol);
259
260 drawMap(outimg, &itsSaliency->flicker, 240, 240, 4, 18);
261 jevois::rawimage::writeText(outimg, "Flicker", 243, 243, txtcol);
262
263 drawMap(outimg, &itsSaliency->motion, 320, 240, 4, 18);
264 jevois::rawimage::writeText(outimg, "Motion", 323, 243, txtcol);
265
266 // Draw the gist vector:
267 drawGist(outimg, itsSaliency->gist, itsSaliency->gist_size, 400, 242, 40, 2);
268
269 // Draw a small square at most salient location in image and in saliency map:
270 jevois::rawimage::drawFilledRect(outimg, mx * 16 + 5, my * 16 + 5, 8, 8, 0xffff);
271 jevois::rawimage::drawFilledRect(outimg, 320 + mx * 16 + 5, my * 16 + 5, 8, 8, 0xffff);
272 jevois::rawimage::drawRect(outimg, rx - roihw, ry - roihw, roihw*2, roihw*2, 0xf0f0);
273 jevois::rawimage::drawRect(outimg, rx - roihw+1, ry - roihw+1, roihw*2-2, roihw*2-2, 0xf0f0);
274
275 // Blank out free space from 480 to 519 at the bottom, and small space above and below gist vector:
276 jevois::rawimage::drawFilledRect(outimg, 480, 240, 40, 60, 0x8000);
277 jevois::rawimage::drawRect(outimg, 400, 240, 80, 2, 0x80a0);
278 jevois::rawimage::drawRect(outimg, 400, 298, 80, 2, 0x80a0);
280
281 // If intro mode, blank out rows 312 to bottom:
282 if (outimg.height == 480)
283 {
284 jevois::rawimage::drawFilledRect(outimg, 0, 312, outimg.width, 48, 0x8000);
285 jevois::rawimage::paste(itsBanner, outimg, 0, 360);
286 }
287 else if (outimg.height > 312)
288 jevois::rawimage::drawFilledRect(outimg, 0, 312, outimg.width, outimg.height - 312, 0x8000);
289
290 // Filter the attended locations:
291 itsKF->set(dmx, dmy, inimg.width, inimg.height);
292 float kfxraw, kfyraw, kfximg, kfyimg;
293 itsKF->get(kfxraw, kfyraw, kfximg, kfyimg, inimg.width, inimg.height, 1.0F, 1.0F);
294
295 // Draw a circle around the kalman-filtered attended location:
296 jevois::rawimage::drawCircle(outimg, int(kfximg), int(kfyimg), 20, 1, jevois::yuyv::LightGreen);
297
298 // Send saliency info to serial port (for arduino, etc):
299 sendSerialImg2D(inimg.width, inimg.height, kfximg, kfyimg, roihw * 2, roihw * 2, "salient");
300
301 // If intro mode, draw some text messages according to our script:
302 if (intromode && intromoviedone)
303 {
304 // Compute fade: we do 1s fade in, 2s full luminance, 1s fade out:
305 int lum = 255;
306 if (scriptframe < 32) lum = scriptframe * 8;
307 else if (scriptframe > 4*30 - 32) lum = std::max(0, (4*30 - scriptframe) * 8);
308
309 // Display the text with the proper fade:
310 int x = (640 - 10 * strlen(scriptitem->msg)) / 2;
311 jevois::rawimage::writeText(outimg, scriptitem->msg, x, 325, 0x7700 | lum, jevois::rawimage::Font10x20);
312
313 // Add a blinking marker if specified in the script:
314 if (scriptitem->blinkx)
315 {
316 int phase = scriptframe / 10;
317 if ((phase % 2) == 0) jevois::rawimage::drawDisk(outimg, scriptitem->blinkx, scriptitem->blinky,
319 }
320
321 // Move to next video frame and possibly next script item or loop the script:
322 if (++scriptframe >= 140)
323 {
324 scriptframe = 0; ++scriptitem;
325 if (scriptitem->msg == nullptr) scriptitem = &TheScript[0];
326 }
327 }
328 });
329
330 // Extract a raw YUYV ROI around attended point:
331 cv::Mat rawimgcv = jevois::rawimage::cvImage(inimg);
332 cv::Mat rawroi = rawimgcv(cv::Rect(rx - roihw, ry - roihw, roihw * 2, roihw * 2));
333
334 if (doobject)
335 {
336 // #################### Object recognition:
337
338 // Prepare a color or grayscale ROI for the object recognition module:
339 auto objsz = itsObjectRecognition->insize();
340 cv::Mat objroi;
341 switch (objsz.depth_)
342 {
343 case 1: // grayscale input
344 {
345 // mnist is white letters on black background, so invert the image before we send it for recognition, as we
346 // assume here black letters on white backgrounds. We also need to provide a clean crop around the digit for
347 // the deep network to work well:
348 cv::cvtColor(rawroi, objroi, cv::COLOR_YUV2GRAY_YUYV);
349
350 // Find the 10th percentile gray value:
351 size_t const elem = (objroi.cols * objroi.rows * 10) / 100;
352 std::vector<unsigned char> v; v.assign(objroi.datastart, objroi.dataend);
353 std::nth_element(v.begin(), v.begin() + elem, v.end());
354 unsigned char const thresh = std::min((unsigned char)(100), std::max((unsigned char)(30), v[elem]));
355
356 // Threshold and invert the image:
357 cv::threshold(objroi, objroi, thresh, 255, cv::THRESH_BINARY_INV);
358
359 // Find the digit and center and crop it:
360 cv::Mat pts; cv::findNonZero(objroi, pts);
361 cv::Rect r = cv::boundingRect(pts);
362 int const cx = r.x + r.width / 2;
363 int const cy = r.y + r.height / 2;
364 int const siz = std::min(roihw * 2, std::max(16, 8 + std::max(r.width, r.height))); // margin of 4 pix
365 int const tlx = std::max(0, std::min(roihw*2 - siz, cx - siz/2));
366 int const tly = std::max(0, std::min(roihw*2 - siz, cy - siz/2));
367 cv::Rect ar(tlx, tly, siz, siz);
368 cv::resize(objroi(ar), objroi, cv::Size(objsz.width_, objsz.height_), 0, 0, cv::INTER_AREA);
369 //cv::imshow("cropped roi", objroi);cv::waitKey(1);
370 }
371 break;
372
373 case 3: // color input
374 cv::cvtColor(rawroi, objroi, cv::COLOR_YUV2RGB_YUYV);
375 cv::resize(objroi, objroi, cv::Size(objsz.width_, objsz.height_), 0, 0, cv::INTER_AREA);
376 break;
377
378 default:
379 LFATAL("Unsupported object detection input depth " << objsz.depth_);
380 }
381
382 // Launch object recognition on the ROI and get the recognition scores:
383 auto scores = itsObjectRecognition->process(objroi);
384
385 // Create a string to show all scores:
386 std::ostringstream oss;
387 for (size_t i = 0; i < scores.size(); ++i)
388 oss << itsObjectRecognition->category(i) << ':' << std::fixed << std::setprecision(2) << scores[i] << ' ';
389 itsScoresStr = oss.str();
390
391 // Check whether the highest score is very high and significantly higher than the second best:
392 float best1 = scores[0], best2 = scores[0]; size_t idx1 = 0, idx2 = 0;
393 for (size_t i = 1; i < scores.size(); ++i)
394 {
395 if (scores[i] > best1) { best2 = best1; idx2 = idx1; best1 = scores[i]; idx1 = i; }
396 else if (scores[i] > best2) { best2 = scores[i]; idx2 = i; }
397 }
398
399 // Update our display upon each "clean" recognition:
400 if (best1 > 90.0F && best2 < 20.0F)
401 {
402 // Remember this recognized object for future displays:
403 itsLastObjectCateg = itsObjectRecognition->category(idx1);
404 itsLastObject = rawimgcv(cv::Rect(rx - 30, ry - 30, 60, 60)).clone(); // make a deep copy
405
406 LINFO("Object recognition: best: " << itsLastObjectCateg <<" (" << best1 <<
407 "), second best: " << itsObjectRecognition->category(idx2) << " (" << best2 << ')');
408 }
409 }
410 else
411 {
412 // #################### Face detection:
413
414 // Prepare a grey ROI from our raw YUYV roi:
415 cv::Mat grayroi; cv::cvtColor(rawroi, grayroi, cv::COLOR_YUV2GRAY_YUYV);
416 cv::equalizeHist(grayroi, grayroi);
417
418 // Launch the face detector:
419 std::vector<cv::Rect> faces; std::vector<std::vector<cv::Rect> > eyes;
420 itsFaceDetector->process(grayroi, faces, eyes, false);
421
422 // Draw the faces and eyes, if any:
423 if (faces.size())
424 {
425 LINFO("detected " << faces.size() << " faces");
426 // Store the attended ROI into our last ROI, fixed size 60x60 for our display:
427 itsLastFace = rawimgcv(cv::Rect(rx - 30, ry - 30, 60, 60)).clone(); // make a deep copy
428 }
429
430 for (size_t i = 0; i < faces.size(); ++i)
431 {
432 // Draw one face:
433 cv::Rect const & f = faces[i];
434 jevois::rawimage::drawRect(outimg, f.x + rx - roihw, f.y + ry - roihw, f.width, f.height, 0xc0ff);
435
436 // Draw the corresponding eyes:
437 for (auto const & e : eyes[i])
438 jevois::rawimage::drawRect(outimg, e.x + rx - roihw, e.y + ry - roihw, e.width, e.height, 0x40ff);
439 }
440 }
441
442 // Let camera know we are done processing the raw YUV input image. NOTE: rawroi is now invalid:
443 inframe.done();
444
445 // Paste our last attended and recognized face and object (or empty pics):
446 cv::Mat outimgcv(outimg.height, outimg.width, CV_8UC2, outimg.buf->data());
447 itsLastObject.copyTo(outimgcv(cv::Rect(520, 240, 60, 60)));
448 itsLastFace.copyTo(outimgcv(cv::Rect(580, 240, 60, 60)));
449
450 // Wait until all saliency drawings are complete (since they blank out our object label area):
451 draw_fut.get();
452
453 // Print all object scores:
454 jevois::rawimage::writeText(outimg, itsScoresStr, 2, 301, txtcol);
455
456 // Write any positively recognized object category:
457 jevois::rawimage::writeText(outimg, itsLastObjectCateg.c_str(), 517-6*itsLastObjectCateg.length(), 263, txtcol);
458
459 // FIXME do svm on gist and write resuts here
460
461 // Show processing fps:
462 std::string const & fpscpu = itsProcessingTimer.stop() + ", v" JEVOIS_VERSION_STRING;
463 jevois::rawimage::writeText(outimg, fpscpu, 3, 240 - 13, jevois::yuyv::White);
464
465 // If on a mac with height = 480, need to flip horizontally for photobooth to work (it will flip again):
466 if (outimg.height == 480) jevois::rawimage::hFlipYUYV(outimg);
467
468 // Send the output image with our processing results to the host over USB:
469 outframe.send();
470
471 // Alternate between face and object recognition:
472 doobject = ! doobject;
473 }
474
475 protected:
476 std::shared_ptr<Saliency> itsSaliency;
477 std::shared_ptr<FaceDetector> itsFaceDetector;
478 std::shared_ptr<ObjectRecognitionBase> itsObjectRecognition;
479 std::shared_ptr<Kalman2D> itsKF;
480 std::shared_ptr<BufferedVideoReader> itsVideo;
482 std::string itsScoresStr;
483};
484
485// Allow the module to be loaded as a shared object (.so) file:
JEVOIS_REGISTER_MODULE(ArUcoBlob)
#define JEVOIS_VERSION_STRING
void drawGist(jevois::RawImage &img, unsigned char const *gist, size_t gistsize, unsigned int xoff, unsigned int yoff, unsigned int width, unsigned int scale)
Definition Saliency.C:771
void drawMap(jevois::RawImage &img, env_image const *fmap, unsigned int xoff, unsigned int yoff, unsigned int scale)
Definition Saliency.C:709
Simple introduction to JeVois and demo that combines saliency, gist, face detection,...
std::shared_ptr< Saliency > itsSaliency
std::shared_ptr< Kalman2D > itsKF
std::shared_ptr< FaceDetector > itsFaceDetector
JeVoisIntro(std::string const &instance)
Constructor.
virtual void process(jevois::InputFrame &&inframe, jevois::OutputFrame &&outframe) override
Processing function.
virtual void postInit() override
Initialization once parameters are set:
jevois::RawImage itsBanner
std::string itsScoresStr
virtual ~JeVoisIntro()
Virtual destructor for safe inheritance.
std::shared_ptr< BufferedVideoReader > itsVideo
std::shared_ptr< ObjectRecognitionBase > itsObjectRecognition
std::filesystem::path absolutePath(std::filesystem::path const &path="")
unsigned int fmt
unsigned int bytesize() const
unsigned int width
unsigned int height
void require(char const *info, unsigned int w, unsigned int h, unsigned int f) const
std::shared_ptr< VideoBuf > buf
void sendSerialImg2D(unsigned int camw, unsigned int camh, float x, float y, float w=0.0F, float h=0.0F, std::string const &id="", std::string const &extra="")
StdModule(std::string const &instance)
std::string const & stop(double *seconds)
ENV_INTG32_TYPE intg32
32-bit signed integer
Definition env_types.h:52
#define LFATAL(msg)
#define LINFO(msg)
void paste(RawImage const &src, RawImage &dest, int dx, int dy)
cv::Mat cvImage(RawImage const &src)
void writeText(RawImage &img, std::string const &txt, int x, int y, unsigned int col, Font font=Font6x10)
void drawFilledRect(RawImage &img, int x, int y, unsigned int w, unsigned int h, unsigned int col)
void drawDisk(RawImage &img, int x, int y, unsigned int rad, unsigned int col)
void convertCvBGRtoRawImage(cv::Mat const &src, RawImage &dst, int quality)
void drawRect(RawImage &img, int x, int y, unsigned int w, unsigned int h, unsigned int thick, unsigned int col)
void hFlipYUYV(RawImage &img)
void drawCircle(RawImage &img, int x, int y, unsigned int rad, unsigned int thick, unsigned int col)
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
unsigned short constexpr Black
unsigned short constexpr White
unsigned short constexpr LightTeal
unsigned short constexpr LightGreen
char const * msg
Definition JeVoisIntro.C:36