JeVois  1.22
JeVois Smart Embedded Machine Vision Toolkit
Share this page:
Loading...
Searching...
No Matches
Network.C
Go to the documentation of this file.
1// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2//
3// JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2021 by Laurent Itti, the University of Southern
4// California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
5//
6// This file is part of the JeVois Smart Embedded Machine Vision Toolkit. This program is free software; you can
7// redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
8// Foundation, version 2. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10// License for more details. You should have received a copy of the GNU General Public License along with this program;
11// if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
12//
13// Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
14// Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
15// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
16/*! \file */
17
18#include <jevois/DNN/Network.H>
19#include <jevois/DNN/Utils.H>
20#include <jevois/Util/Async.H>
21#include <jevois/Debug/Timer.H>
23
24// Special output tensor number that means apply transform to all output tensors:
25#define ALL_TENSORS 12345678
26
27// ####################################################################################################
30
31// ####################################################################################################
33{
34 comment::freeze(doit);
35 url::freeze(doit);
36 extraintensors::freeze(doit);
37}
38
39// ####################################################################################################
40void jevois::dnn::Network::onParamChange(network::outtransform const &, std::string const & val)
41{
42 itsOps.clear();
43 if (val.empty()) return;
44
45 // Split sequence by semi-colon:
46 std::vector<std::string> ops = jevois::split(val, "\\s*;\\s*");
47
48 // Decode each operation as op(arg1, arg2, ...):
49 for (std::string const & op : ops)
50 {
51 Oper o; bool syntax_error = true;
52 std::vector<std::string> tok = jevois::split(op, "(\\s*\\(\\s*|\\s*,\\s*|\\s*\\)\\s*)");
53 //LINFO("op=["<<op<<"] and tok="<<jevois::join(tok, "/"));
54
55 // ----------------------------------------------------------------------------------------------------
56 if (tok[0] == "shape")
57 {
58 if (tok.size() == 3)
59 try
60 {
61 o.op = Operator::Shape;
62 o.tnum.emplace_back(std::stoul(tok[1]));
63 std::vector<size_t> const newshape = jevois::dnn::strshape(tok[2]);
64 for (size_t v : newshape) o.newvals.emplace_back(int(v));
65 syntax_error = false;
66 } catch (...) { }
67
68 if (syntax_error) LFATAL("Syntax error, expected: shape(outnum, AxBxC...)");
69 }
70 // ----------------------------------------------------------------------------------------------------
71 else if (tok[0] == "transpose")
72 {
73 if (tok.size() >= 3)
74 try
75 {
76 o.op = Operator::Transpose;
77 if (tok[1] == "*") o.tnum.emplace_back(ALL_TENSORS); // Special tensor number * means all of them
78 else o.tnum.emplace_back(std::stoul(tok[1]));
79 for (size_t i = 2; i < tok.size(); ++i) o.newvals.emplace_back(int(std::stoul(tok[i])));
80 syntax_error = false;
81 } catch (...) { }
82
83 if (syntax_error) LFATAL("Syntax error, expected: transpose(outnum, oldaxisA, oldaxisB, ...), where "
84 "transposed new axis 0 (the outermost dimension, typically batch size) will be "
85 "from oldaxisA, new axis 1 from oldaxisB, etc");
86 }
87 // ----------------------------------------------------------------------------------------------------
88 else if (tok[0] == "order")
89 {
90 if (tok.size() >= 3)
91 try
92 {
93 o.op = Operator::Order;
94 for (size_t i = 1; i < tok.size(); ++i) o.newvals.emplace_back(int(std::stoul(tok[i])));
95 syntax_error = false;
96 } catch (...) { }
97
98 if (syntax_error) LFATAL("Syntax error, expected: order(oldidx0, oldidx1, ...), where the new order will be "
99 "new tensor 0: old tensor oldidx0 (which is zero-based); new tensor 1: "
100 "old tensor oldidx1, etc. It is ok to have duplicated or missing entries.");
101 }
102 // ----------------------------------------------------------------------------------------------------
103 else if (tok[0] == "split")
104 {
105 if (tok.size() >= 4)
106 try
107 {
108 o.op = Operator::Split;
109 if (tok[1] == "*") o.tnum.emplace_back(ALL_TENSORS); // Special tensor number * means all of them
110 else o.tnum.emplace_back(std::stoul(tok[1]));
111 o.tnum.emplace_back(std::stoul(tok[2])); // axis number
112 for (size_t i = 3; i < tok.size(); ++i) o.newvals.emplace_back(int(std::stoul(tok[i])));
113 syntax_error = false;
114 } catch (...) { }
115
116 if (syntax_error) LFATAL("Syntax error, expected: split(outnum, axis, newsize1, ..., newsizeN), where "
117 "axis 0 is the outtermost dimension (typically, batch size), and newsize1 + ... "
118 "+ newsizeN must be equal to the original size of that axis.");
119 }
120 // ----------------------------------------------------------------------------------------------------
121 else if (tok[0] == "merge")
122 {
123 if (tok.size() >= 3)
124 try
125 {
126 o.op = Operator::Merge;
127 o.tnum.emplace_back(std::stoul(tok[1])); // axis number
128 for (size_t i = 2; i < tok.size(); ++i) o.newvals.emplace_back(int(std::stoul(tok[i])));
129 syntax_error = false;
130
131 // Check that tensors to merge are listed in ascending order:
132 for (size_t i = 0; i < o.newvals.size() - 1; ++i)
133 if (o.newvals[i] > o.newvals[i+1]) { syntax_error = true; break; }
134 } catch (...) { }
135
136 if (syntax_error) LFATAL("Syntax error, expected: merge(axis, outnum1, ..., outnumN), where "
137 "axis 0 is the outermost dimension (typically, batch size) and outnum1, ..., "
138 "outnumN are the outputs to merge along that axis. All the outputs to be merged "
139 "must have matching number of dimensions, and matching sizes on all other axes. "
140 "The merged tensor will replace the first output listed in the merge, and the other "
141 "listed will be removed. Outputs to merge must be listed in ascending order (use "
142 "an order() transform first if needed)");
143 }
144 // ----------------------------------------------------------------------------------------------------
145 else LFATAL("Syntax error: Unrecognized operation: " << op);
146
147 itsOps.emplace_back(o);
148 }
149}
150
151// ####################################################################################################
153{
154 // Do not destroy a network that is loading, and do not throw...
155 size_t count = 0;
156 while (itsLoading.load())
157 {
158 std::this_thread::sleep_for(std::chrono::milliseconds(5));
159 try { if (ready()) break; } catch (...) { }
160 if (count++ == 200) { LINFO("Waiting for network load to complete..."); count = 0; }
161 }
162}
163
164// ####################################################################################################
166{
167 // If we are loaded, we are ready to process:
168 if (itsLoaded.load()) return true;
169
170 // If we are loading, check whether loading is complete or threw, otherwise return false as we keep loading:
171 if (itsLoading.load())
172 {
173 if (itsLoadFut.valid() && itsLoadFut.wait_for(std::chrono::milliseconds(2)) == std::future_status::ready)
174 {
175 try { itsLoadFut.get(); itsLoaded.store(true); itsLoading.store(false); LINFO("Network loaded."); return true; }
176 catch (...) { itsLoading.store(false); jevois::warnAndRethrowException(); }
177 }
178 return false;
179 }
180
181 // For Python networks, we need to load in the current thread and block everyone...
182 if (dynamic_cast<jevois::dnn::NetworkPython *>(this) != nullptr)
183 {
184 LINFO("Loading network...");
185 this->load();
186 itsLoaded.store(true);
187 itsLoading.store(false);
188 LINFO("Network loaded.");
189 return true;
190 }
191
192 // Otherwise, trigger an async load:
193 itsLoading.store(true);
194 itsLoadFut = jevois::async(std::bind(&jevois::dnn::Network::load, this));
195 LINFO("Loading network...");
196
197 return false;
198}
199
200// ####################################################################################################
201std::vector<cv::Mat> jevois::dnn::Network::process(std::vector<cv::Mat> const & blobs,
202 std::vector<std::string> & info)
203{
204 if (ready() == false) LFATAL("Network is not ready");
205 static jevois::TimerOne eitimer("Create extra inputs");
206 static jevois::TimerOne tftimer("Transform outputs");
207
208 std::vector<cv::Mat> outs;
209 std::string const c = comment::get();
210
211 // Add any extra input tensors?
212 std::string const extra = extraintensors::get();
213 if (extra.empty() == false)
214 {
215 eitimer.start();
216
217 std::vector<cv::Mat> newblobs = blobs;
218
219 std::vector<std::string> ins = jevois::split(extra, ",\\s*");
220 for (std::string const & in : ins)
221 {
222 vsi_nn_tensor_attr_t attr; memset(&attr, 0, sizeof(attr));
223
224 std::vector<std::string> tok = jevois::split(in, ":");
225 if (tok.size() != 3)
226 LFATAL("Malformed extra tensor, need <type>:<shape>:val1 val2 ... valN (separate multiple tensors by comma)");
227
228 // Decode type and convert to vsi, only those types that OpenCV can support:
229 if (tok[0] == "8U") attr.dtype.vx_type = VSI_NN_TYPE_UINT8;
230 else if (tok[0] == "8S") attr.dtype.vx_type = VSI_NN_TYPE_INT8;
231 else if (tok[0] == "16U") attr.dtype.vx_type = VSI_NN_TYPE_UINT16;
232 else if (tok[0] == "16S") attr.dtype.vx_type = VSI_NN_TYPE_INT16;
233 else if (tok[0] == "16F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT16;
234 else if (tok[0] == "32S") attr.dtype.vx_type = VSI_NN_TYPE_INT32;
235 else if (tok[0] == "32F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT32;
236 else if (tok[0] == "64F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT64;
237 else throw std::range_error("Unsupported extra input tensor type [" + tok[0] + "] in " + extra);
238
239 // Decode the dims:
240 std::vector<size_t> dims = jevois::dnn::strshape(tok[1]);
241 attr.dim_num = dims.size();
242 for (size_t i = 0; i < attr.dim_num; ++i) attr.size[attr.dim_num - 1 - i] = dims[i];
243
244 // Allocate the tensor:
245 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_NONE;
246 attr.dtype.fmt = VSI_NN_DIM_FMT_AUTO;
247 cv::Mat b = jevois::dnn::attrmat(attr);
248
249 // Populate the values:
250 std::vector<std::string> vals = jevois::split(tok[2], "\\s+");
251 size_t const nvals = vals.size();
252 if (nvals != b.total())
253 LFATAL("Extra in tensor needs " << b.total() << " values, but " << nvals << " given in [" << in << ']');
254 switch (attr.dtype.vx_type)
255 {
256 case VSI_NN_TYPE_UINT8:
257 {
258 uint8_t * ptr = reinterpret_cast<uint8_t *>(b.data);
259 for (std::string const & v : vals) *ptr++ = std::stoi(v);
260 }
261 break;
262
263 case VSI_NN_TYPE_INT8:
264 {
265 int8_t * ptr = reinterpret_cast<int8_t *>(b.data);
266 for (std::string const & v : vals) *ptr++ = std::stoi(v);
267 }
268 break;
269
270 case VSI_NN_TYPE_UINT16:
271 {
272 uint16_t * ptr = reinterpret_cast<uint16_t *>(b.data);
273 for (std::string const & v : vals) *ptr++ = std::stoi(v);
274 }
275 break;
276
277 case VSI_NN_TYPE_INT16:
278 {
279 int16_t * ptr = reinterpret_cast<int16_t *>(b.data);
280 for (std::string const & v : vals) *ptr++ = std::stoi(v);
281 }
282 break;
283
284 case VSI_NN_TYPE_FLOAT16:
285 {
286 cv::hfloat * ptr = reinterpret_cast<cv::hfloat *>(b.data);
287 for (std::string const & v : vals) *ptr++ = cv::hfloat(std::stof(v));
288 }
289 break;
290
291 case VSI_NN_TYPE_INT32:
292 {
293 int32_t * ptr = reinterpret_cast<int32_t *>(b.data);
294 for (std::string const & v : vals) *ptr++ = std::stoi(v);
295 }
296 break;
297
298 case VSI_NN_TYPE_FLOAT32:
299 {
300 float * ptr = reinterpret_cast<float *>(b.data);
301 for (std::string const & v : vals) *ptr++ = std::stof(v);
302 }
303 break;
304
305 case VSI_NN_TYPE_FLOAT64:
306 {
307 double * ptr = reinterpret_cast<double *>(b.data);
308 for (std::string const & v : vals) *ptr++ = std::stod(v);
309 }
310 break;
311
312 default: LFATAL("internal inconsistency");
313 }
314
315 newblobs.emplace_back(std::move(b));
316 }
317
318 // NOTE: Keep the code below in sync with the default case (no extra inputs). Both branches are duplicated to avoid
319 // having to make a copy of blobs into newblobs in the standard case when we do not have any extra inputs:
320
321 // Show info about input tensors:
322 info.emplace_back("* Input Tensors");
323 for (cv::Mat const & b : newblobs) info.emplace_back("- " + jevois::dnn::shapestr(b));
324 info.emplace_back(eitimer.stop());
325
326 // Run processing on the derived class:
327 info.emplace_back("* Network");
328 if (c.empty() == false) info.emplace_back(c);
329
330 outs = doprocess(newblobs, info);
331 }
332 else
333 {
334 // Show info about input tensors:
335 info.emplace_back("* Input Tensors");
336 for (cv::Mat const & b : blobs) info.emplace_back("- " + jevois::dnn::shapestr(b));
337
338 // Run processing on the derived class:
339 info.emplace_back("* Network");
340 if (c.empty() == false) info.emplace_back(c);
341
342 outs = doprocess(blobs, info);
343 }
344
345 // Show info about output tensors:
346 info.emplace_back("* Output Tensors");
347 for (size_t i = 0; i < outs.size(); ++i) info.emplace_back("- " + jevois::dnn::shapestr(outs[i]));
348
349 // Possibly apply some transformation sequence to the outputs:
350 if (itsOps.empty() == false)
351 {
352 tftimer.start();
353
354 info.emplace_back("* Output Tensors Transforms");
355
356 for (Oper const & o : itsOps)
357 switch(o.op)
358 {
359 // ----------------------------------------------------------------------------------------------------
360 case Operator::Shape:
361 {
362 // tnum guaranteed to have 1 entry; newvals has a valid nD shape.
363 size_t const tnum = o.tnum[0];
364
365 try
366 {
367 outs[tnum] = outs[tnum].reshape(1, o.newvals);
368 info.emplace_back("- shape out " + std::to_string(tnum) + " to " + jevois::dnn::shapestr(outs[tnum]));
369 }
370 catch (...)
371 {
372 LFATAL("While attempting output transform 'shape(" << tnum << ", " <<
373 jevois::dnn::shapestr(o.newvals, outs[tnum].type()) << ")': Cannot reshape from " <<
374 jevois::dnn::shapestr(outs[tnum]) << " to desired dims because of total number of elements mismatch");
375 }
376 }
377 break;
378
379 // ----------------------------------------------------------------------------------------------------
380 case Operator::Transpose:
381 {
382 // tnum guaranteed to have 1 entry; newvals has a list of axis numbers
383 size_t tnum = o.tnum[0];
384
385 // Possibly parallelize if more than one transpose to do:
386 if (tnum == ALL_TENSORS)
387 {
388 std::vector<std::future<void>> fvec;
389 for (size_t t = 0; t < outs.size(); ++t)
390 {
391 info.emplace_back("- transpose out " + std::to_string(t) + " to " + jevois::dnn::shapestr(outs[t]));
392 fvec.emplace_back(jevois::async([&](size_t t)
393 {
394 try
395 {
396 // Do the transpose. cv::transposeND() needs separate source and dest tensors:
397 cv::Mat newout; cv::transposeND(outs[t], o.newvals, newout); outs[t] = std::move(newout);
398 }
399 catch (...)
400 {
401 LFATAL("While attempting output transform 'transpose(" << t << ", " << jevois::join(o.newvals, ", ") <<
402 ")': Cannot transpose from " << jevois::dnn::shapestr(outs[t]) <<
403 " to desired shape, check number of dimensions and that the desired axes contain every "
404 "source axis number exactly once.");
405 }
406 }, t));
407 }
408
409 // Use joinall() to get() all futures and throw a single consolidated exception if any thread threw:
410 jevois::joinall(fvec);
411 }
412 else
413 {
414 // Only one tensor to transpose:
415 try
416 {
417 // Do the transpose. cv::transposeND() needs separate source and dest tensors:
418 cv::Mat newout; cv::transposeND(outs[tnum], o.newvals, newout); outs[tnum] = std::move(newout);
419 info.emplace_back("- transpose out " + std::to_string(tnum) + " to " + jevois::dnn::shapestr(outs[tnum]));
420 }
421 catch (...)
422 {
423 LFATAL("While attempting output transform 'transpose(" << tnum << ", " << jevois::join(o.newvals, ", ") <<
424 ")': Cannot transpose from " << jevois::dnn::shapestr(outs[tnum]) <<
425 " to desired shape, check number of dimensions and that the desired axes contain every source axis "
426 "number exactly once.");
427 }
428 }
429 }
430 break;
431
432 // ----------------------------------------------------------------------------------------------------
433 case Operator::Order:
434 {
435 std::vector<cv::Mat> newouts; int const osiz = int(outs.size());
436
437 for (int idx : o.newvals)
438 if (idx >= 0 && idx < osiz)
439 newouts.push_back(outs[idx]); // no emplace_back() here as one tensor may be pushed several times
440 else
441 LFATAL("While attempting output transform 'order(" << jevois::join(o.newvals, ", ") <<
442 ")': Output number " << idx << " does not exist");
443
444 info.emplace_back("- order: " + jevois::join(o.newvals, ", "));
445 outs = std::move(newouts);
446 }
447 break;
448
449 // ----------------------------------------------------------------------------------------------------
450 case Operator::Split:
451 {
452 size_t const tnum = o.tnum[0];
453 size_t const axis = o.tnum[1];
454
455 std::vector<cv::Mat> newouts;
456 for (size_t i = 0; i < outs.size(); ++i)
457 if (i == tnum || tnum == ALL_TENSORS)
458 {
459 // Split that tensor and push the resulting tensors. split() will check validity of axis, sizes, etc:
460 try
461 {
462 std::vector<cv::Mat> mats = jevois::dnn::split(outs[i], axis, o.newvals);
463
464 // Add those mats, create info string:
465 std::string inf = "- split out " + std::to_string(i) + " to ";
466 for (cv::Mat & m : mats)
467 {
468 inf += jevois::dnn::shapestr(m) + ", ";
469 newouts.emplace_back(m);
470 }
471 info.emplace_back(inf.substr(0, inf.length() - 2));
472 }
473 catch (std::exception const & e)
474 {
475 LFATAL("While attempting output transform 'split(" << i << ", " << axis << ", " <<
476 jevois::join(o.newvals, ", ") << ")': error: " << e.what());
477 }
478 catch (...)
479 {
480 LFATAL("While attempting output transform 'split(" << i << ", " << axis << ", " <<
481 jevois::join(o.newvals, ", ") << ")': unknown error");
482 }
483 }
484 else newouts.push_back(outs[i]); // Just transfer that tensor
485
486 outs = std::move(newouts);
487 }
488 break;
489
490 // ----------------------------------------------------------------------------------------------------
491 case Operator::Merge:
492 {
493 size_t const axis = o.tnum[0]; size_t const numouts = outs.size();
494 std::vector<cv::Mat> newouts;
495
496 // Decide what to do for each output: 0=copy over, 1=run the merge and store result, 2=skip other merge parts.
497 // Also build a vector of those tensors to merge:
498 int action[numouts]; bool didmerge = false; std::vector<cv::Mat> tomerge;
499 for (int i = 0; i < int(numouts); ++i)
500 {
501 if (outs[i].type() != CV_32F && outs[i].type() != CV_64F && outs[i].type() != CV_16F)
502 LFATAL("While attempting output transform 'merge(" << axis << ", " << jevois::join(o.newvals, ", ") <<
503 ")': Cannot merge quantized tensors");
504
505 // Check if i is in our merge list:
506 bool inmerge = false; for (int j : o.newvals) if (i == j) { inmerge = true; break; }
507 if (inmerge)
508 {
509 tomerge.push_back(outs[i]); // no emplace_back() here as we might want to duplicate a tensor
510 if (didmerge == false) { action[i] = 1; didmerge = true; } else action[i] = 2;
511 }
512 else action[i] = 0;
513 }
514
515 // Ready to rock:
516 for (size_t i = 0; i < numouts; ++i)
517 try
518 {
519 switch (action[i])
520 {
521 case 0: // push that tensor unmodified
522 newouts.push_back(outs[i]);
523 break;
524
525 case 1: // push the merged tensor
526 newouts.emplace_back(jevois::dnn::concatenate(tomerge, axis));
527 info.emplace_back("- merged outs " + jevois::join(o.newvals, ", ") + " into " +
528 jevois::dnn::shapestr(newouts.back()) + " (new out " +
529 std::to_string(newouts.size()-1) + ')');
530 break;
531
532 case 2: // skip the other tensors that were merged
533 break;
534
535 default: LFATAL("Internal inconsistency in merge() transform");
536 }
537 }
538 catch (std::exception const & e)
539 {
540 LFATAL("While attempting output transform 'merge(" << axis << ", " <<
541 jevois::join(o.newvals, ", ") << ")': error: " << e.what());
542 }
543 catch (...)
544 {
545 LFATAL("While attempting output transform 'merge(" << axis << ", " <<
546 jevois::join(o.newvals, ", ") << ")': unknown error");
547 }
548
549 outs = std::move(newouts);
550 }
551 break;
552
553 // ----------------------------------------------------------------------------------------------------
554 default:
555 LFATAL("Internal error: Unsupported output transform op " << int(o.op));
556 }
557
558 info.emplace_back(tftimer.stop());
559
560 // Show info about transformed output tensors:
561 info.emplace_back("* Transformed Output Tensors");
562 for (size_t i = 0; i < outs.size(); ++i) info.emplace_back("- " + jevois::dnn::shapestr(outs[i]));
563 }
564
565 return outs;
566}
#define o
Definition Font10x20.C:6
#define ALL_TENSORS
Definition Network.C:25
Simple one-shot timer class.
Definition Timer.H:72
std::string stop(double *seconds)
End a time measurement period, report time spent as: 'prefix: ms (fps)' where % is replaced by values...
Definition Timer.C:162
void start()
Start a time measurement period.
Definition Timer.C:156
Wrapper around an DNN neural network invoked through python.
virtual void load()=0
Load from disk.
bool ready()
Returns true when network is ready to run (loaded and initialized)
Definition Network.C:165
std::vector< cv::Mat > process(std::vector< cv::Mat > const &blobs, std::vector< std::string > &info)
Process input blobs and obtain output blobs.
Definition Network.C:201
void onParamChange(network::outtransform const &param, std::string const &val) override
Definition Network.C:40
void waitBeforeDestroy()
If network is currently loading, wait until that is done before destroying.
Definition Network.C:152
virtual ~Network()
Destructor.
Definition Network.C:28
virtual void freeze(bool doit)
Freeze/unfreeze parameters that users should not change while running.
Definition Network.C:32
#define LFATAL(msg)
Convenience macro for users to print out console or syslog messages, FATAL level.
Definition Log.H:230
void warnAndRethrowException(std::string const &prefix="")
Convenience function to catch an exception, issue some LERROR (depending on type),...
Definition Log.C:203
#define LINFO(msg)
Convenience macro for users to print out console or syslog messages, INFO level.
Definition Log.H:194
std::vector< cv::Mat > split(cv::Mat const &tensor, int axis, std::vector< int > const &sizes)
Split a tensor into several, along a given axis.
Definition Utils.C:980
cv::Mat attrmat(vsi_nn_tensor_attr_t const &attr, void *dataptr=nullptr)
Construct a cv::Mat from attr and possibly data pointer.
Definition Utils.C:495
cv::Mat concatenate(std::vector< cv::Mat > const &tensors, int axis)
Concatenate several tensors into one.
Definition Utils.C:920
std::string shapestr(cv::Mat const &m)
Get a string of the form: "nD AxBxC... TYPE" from an n-dimensional cv::Mat with data type TYPE.
Definition Utils.C:109
std::vector< size_t > strshape(std::string const &str)
Get a vector of size_t from a string containing AxBxC...
Definition Utils.C:301
std::vector< T > joinall(std::vector< std::future< T > > &fvec, bool multiline=true)
Collect results from several async threads that are all returning a T result.
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
Async execution using a thread pool.
std::string join(std::vector< T > const &tokens, std::string const &delimiter)
Concatenate a vector of tokens into a string.
std::vector< std::string > split(std::string const &input, std::string const &regex="\\s+")
Split string into vector of tokens using a regex to specify what to split on; default regex splits by...
Definition Utils.C:270