43 if (val.empty())
return;
46 std::vector<std::string> ops =
jevois::split(val,
"\\s*;\\s*");
49 for (std::string
const & op : ops)
51 Oper
o;
bool syntax_error =
true;
52 std::vector<std::string> tok =
jevois::split(op,
"(\\s*\\(\\s*|\\s*,\\s*|\\s*\\)\\s*)");
56 if (tok[0] ==
"shape")
61 o.op = Operator::Shape;
62 o.tnum.emplace_back(std::stoul(tok[1]));
64 for (
size_t v : newshape)
o.newvals.emplace_back(
int(v));
68 if (syntax_error)
LFATAL(
"Syntax error, expected: shape(outnum, AxBxC...)");
71 else if (tok[0] ==
"transpose")
76 o.op = Operator::Transpose;
78 else o.tnum.emplace_back(std::stoul(tok[1]));
79 for (
size_t i = 2; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
83 if (syntax_error)
LFATAL(
"Syntax error, expected: transpose(outnum, oldaxisA, oldaxisB, ...), where "
84 "transposed new axis 0 (the outermost dimension, typically batch size) will be "
85 "from oldaxisA, new axis 1 from oldaxisB, etc");
88 else if (tok[0] ==
"order")
93 o.op = Operator::Order;
94 for (
size_t i = 1; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
98 if (syntax_error)
LFATAL(
"Syntax error, expected: order(oldidx0, oldidx1, ...), where the new order will be "
99 "new tensor 0: old tensor oldidx0 (which is zero-based); new tensor 1: "
100 "old tensor oldidx1, etc. It is ok to have duplicated or missing entries.");
103 else if (tok[0] ==
"split")
108 o.op = Operator::Split;
110 else o.tnum.emplace_back(std::stoul(tok[1]));
111 o.tnum.emplace_back(std::stoul(tok[2]));
112 for (
size_t i = 3; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
113 syntax_error =
false;
116 if (syntax_error)
LFATAL(
"Syntax error, expected: split(outnum, axis, newsize1, ..., newsizeN), where "
117 "axis 0 is the outtermost dimension (typically, batch size), and newsize1 + ... "
118 "+ newsizeN must be equal to the original size of that axis.");
121 else if (tok[0] ==
"merge")
126 o.op = Operator::Merge;
127 o.tnum.emplace_back(std::stoul(tok[1]));
128 for (
size_t i = 2; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
129 syntax_error =
false;
132 for (
size_t i = 0; i <
o.newvals.size() - 1; ++i)
133 if (
o.newvals[i] >
o.newvals[i+1]) { syntax_error =
true;
break; }
136 if (syntax_error)
LFATAL(
"Syntax error, expected: merge(axis, outnum1, ..., outnumN), where "
137 "axis 0 is the outermost dimension (typically, batch size) and outnum1, ..., "
138 "outnumN are the outputs to merge along that axis. All the outputs to be merged "
139 "must have matching number of dimensions, and matching sizes on all other axes. "
140 "The merged tensor will replace the first output listed in the merge, and the other "
141 "listed will be removed. Outputs to merge must be listed in ascending order (use "
142 "an order() transform first if needed)");
145 else LFATAL(
"Syntax error: Unrecognized operation: " << op);
147 itsOps.emplace_back(
o);
213 size_t const numin = inputShapes().size();
215 LFATAL(
"Cannot set input " << num <<
": network only has " << numin <<
" inputs");
217 std::vector<std::string> extrains =
jevois::split(extraintensors::get(),
",\\s*");
218 size_t const numextra = extrains.size();
219 if (numextra > numin)
220 LFATAL(numextra <<
" extra inputs specified, but net only has " << numin <<
" total inputs");
221 if (num + numextra < numin)
222 LFATAL(
"Cannot set input " << num <<
" (net has " << numin <<
" inputs, including " << numextra <<
" extra ones)");
224 std::string
const & ein = extrains[num + numextra - numin];
226 if (tok.size() != 3)
LFATAL(
"Malformed extra tensor, need <type>:<shape>:external");
230 if (tok[0] ==
"8U") in.convertTo(cvtin, CV_8U);
231 else if (tok[0] ==
"8S") in.convertTo(cvtin, CV_8S);
232 else if (tok[0] ==
"16U") in.convertTo(cvtin, CV_16U);
233 else if (tok[0] ==
"16S") in.convertTo(cvtin, CV_16S);
234 else if (tok[0] ==
"16F") in.convertTo(cvtin, CV_16F);
235 else if (tok[0] ==
"32S") in.convertTo(cvtin, CV_32S);
236 else if (tok[0] ==
"32F") cvtin = in;
237 else if (tok[0] ==
"64F") in.convertTo(cvtin, CV_64F);
238 else throw std::range_error(
"Unsupported extra input tensor type [" + tok[0] +
"] in " + ein);
240 setExtraInput(num, cvtin);
245 std::vector<std::string> & info)
247 if (ready() ==
false)
LFATAL(
"Network is not ready");
251 std::vector<cv::Mat> outs;
252 std::string
const c = comment::get();
255 std::string
const extra = extraintensors::get();
256 if (extra.empty() ==
false)
259 std::lock_guard<std::mutex> _(itsExtraInputsMtx);
263 std::vector<cv::Mat> newblobs = blobs;
265 std::vector<std::string> ins =
jevois::split(extra,
",\\s*");
266 for (std::string
const & in : ins)
268 vsi_nn_tensor_attr_t attr; memset(&attr, 0,
sizeof(attr));
272 LFATAL(
"Malformed extra tensor, need <type>:<shape>:val1 val2 ... valN (separate multiple tensors by comma)");
275 if (tok[0] ==
"8U") attr.dtype.vx_type = VSI_NN_TYPE_UINT8;
276 else if (tok[0] ==
"8S") attr.dtype.vx_type = VSI_NN_TYPE_INT8;
277 else if (tok[0] ==
"16U") attr.dtype.vx_type = VSI_NN_TYPE_UINT16;
278 else if (tok[0] ==
"16S") attr.dtype.vx_type = VSI_NN_TYPE_INT16;
279 else if (tok[0] ==
"16F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT16;
280 else if (tok[0] ==
"32S") attr.dtype.vx_type = VSI_NN_TYPE_INT32;
281 else if (tok[0] ==
"32F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT32;
282 else if (tok[0] ==
"64F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT64;
283 else throw std::range_error(
"Unsupported extra input tensor type [" + tok[0] +
"] in " + extra);
287 attr.dim_num = dims.size();
288 for (
size_t i = 0; i < attr.dim_num; ++i) attr.size[attr.dim_num - 1 - i] = dims[i];
291 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_NONE;
292 attr.dtype.fmt = VSI_NN_DIM_FMT_AUTO;
297 if (tok[2] ==
"external")
299 auto itr = itsExtraInputs.find(newblobs.size());
300 if (itr == itsExtraInputs.end()) newblobs.emplace_back(std::move(b));
301 else newblobs.push_back(itr->second);
306 std::vector<std::string> vals =
jevois::split(tok[2],
"\\s+");
307 size_t const nvals = vals.size();
308 if (nvals != b.total())
309 LFATAL(
"Extra in tensor needs " << b.total() <<
" values, but " << nvals <<
" given in [" << in <<
']');
310 switch (attr.dtype.vx_type)
312 case VSI_NN_TYPE_UINT8:
314 uint8_t * ptr =
reinterpret_cast<uint8_t *
>(b.data);
315 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
319 case VSI_NN_TYPE_INT8:
321 int8_t * ptr =
reinterpret_cast<int8_t *
>(b.data);
322 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
326 case VSI_NN_TYPE_UINT16:
328 uint16_t * ptr =
reinterpret_cast<uint16_t *
>(b.data);
329 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
333 case VSI_NN_TYPE_INT16:
335 int16_t * ptr =
reinterpret_cast<int16_t *
>(b.data);
336 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
340 case VSI_NN_TYPE_FLOAT16:
342 cv::hfloat * ptr =
reinterpret_cast<cv::hfloat *
>(b.data);
343 for (std::string
const & v : vals) *ptr++ = cv::hfloat(std::stof(v));
347 case VSI_NN_TYPE_INT32:
349 int32_t * ptr =
reinterpret_cast<int32_t *
>(b.data);
350 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
354 case VSI_NN_TYPE_FLOAT32:
356 float * ptr =
reinterpret_cast<float *
>(b.data);
357 for (std::string
const & v : vals) *ptr++ = std::stof(v);
361 case VSI_NN_TYPE_FLOAT64:
363 double * ptr =
reinterpret_cast<double *
>(b.data);
364 for (std::string
const & v : vals) *ptr++ = std::stod(v);
368 default:
LFATAL(
"internal inconsistency");
371 newblobs.emplace_back(std::move(b));
379 info.emplace_back(
"* Input Tensors");
381 info.emplace_back(eitimer.
stop());
384 info.emplace_back(
"* Network");
385 if (c.empty() ==
false) info.emplace_back(c);
387 outs = doprocess(newblobs, info);
392 info.emplace_back(
"* Input Tensors");
396 info.emplace_back(
"* Network");
397 if (c.empty() ==
false) info.emplace_back(c);
399 outs = doprocess(blobs, info);
403 info.emplace_back(
"* Output Tensors");
407 if (itsOps.empty() ==
false)
411 info.emplace_back(
"* Output Tensors Transforms");
413 for (Oper
const &
o : itsOps)
417 case Operator::Shape:
420 size_t const tnum =
o.tnum[0];
424 outs[tnum] = outs[tnum].reshape(1,
o.newvals);
425 info.emplace_back(
"- shape out " + std::to_string(tnum) +
" to " +
jevois::dnn::shapestr(outs[tnum]));
429 LFATAL(
"While attempting output transform 'shape(" << tnum <<
", " <<
431 jevois::dnn::shapestr(outs[tnum]) <<
" to desired dims because of total number of elements mismatch");
437 case Operator::Transpose:
440 size_t tnum =
o.tnum[0];
445 std::vector<std::future<void>> fvec;
446 for (
size_t t = 0; t < outs.size(); ++t)
454 cv::Mat newout; cv::transposeND(outs[t],
o.newvals, newout); outs[t] = std::move(newout);
458 LFATAL(
"While attempting output transform 'transpose(" << t <<
", " <<
jevois::join(
o.newvals,
", ") <<
460 " to desired shape, check number of dimensions and that the desired axes contain every "
461 "source axis number exactly once.");
475 cv::Mat newout; cv::transposeND(outs[tnum],
o.newvals, newout); outs[tnum] = std::move(newout);
476 info.emplace_back(
"- transpose out " + std::to_string(tnum) +
" to " +
jevois::dnn::shapestr(outs[tnum]));
480 LFATAL(
"While attempting output transform 'transpose(" << tnum <<
", " <<
jevois::join(
o.newvals,
", ") <<
482 " to desired shape, check number of dimensions and that the desired axes contain every source axis "
483 "number exactly once.");
490 case Operator::Order:
492 std::vector<cv::Mat> newouts;
int const osiz = int(outs.size());
494 for (
int idx :
o.newvals)
495 if (idx >= 0 && idx < osiz)
496 newouts.push_back(outs[idx]);
499 ")': Output number " << idx <<
" does not exist");
501 info.emplace_back(
"- order: " +
jevois::join(
o.newvals,
", "));
502 outs = std::move(newouts);
507 case Operator::Split:
509 size_t const tnum =
o.tnum[0];
510 size_t const axis =
o.tnum[1];
512 std::vector<cv::Mat> newouts;
513 for (
size_t i = 0; i < outs.size(); ++i)
522 std::string inf =
"- split out " + std::to_string(i) +
" to ";
523 for (cv::Mat & m : mats)
526 newouts.emplace_back(m);
528 info.emplace_back(inf.substr(0, inf.length() - 2));
530 catch (std::exception
const & e)
532 LFATAL(
"While attempting output transform 'split(" << i <<
", " << axis <<
", " <<
537 LFATAL(
"While attempting output transform 'split(" << i <<
", " << axis <<
", " <<
541 else newouts.push_back(outs[i]);
543 outs = std::move(newouts);
548 case Operator::Merge:
550 size_t const axis =
o.tnum[0];
size_t const numouts = outs.size();
551 std::vector<cv::Mat> newouts;
555 int action[numouts];
bool didmerge =
false; std::vector<cv::Mat> tomerge;
556 for (
int i = 0; i < int(numouts); ++i)
558 if (outs[i].type() != CV_32F && outs[i].type() != CV_64F && outs[i].type() != CV_16F)
559 LFATAL(
"While attempting output transform 'merge(" << axis <<
", " <<
jevois::join(
o.newvals,
", ") <<
560 ")': Cannot merge quantized tensors");
563 bool inmerge =
false;
for (
int j :
o.newvals)
if (i == j) { inmerge =
true;
break; }
566 tomerge.push_back(outs[i]);
567 if (didmerge ==
false) { action[i] = 1; didmerge =
true; }
else action[i] = 2;
573 for (
size_t i = 0; i < numouts; ++i)
579 newouts.push_back(outs[i]);
584 info.emplace_back(
"- merged outs " +
jevois::join(
o.newvals,
", ") +
" into " +
586 std::to_string(newouts.size()-1) +
')');
592 default:
LFATAL(
"Internal inconsistency in merge() transform");
595 catch (std::exception
const & e)
597 LFATAL(
"While attempting output transform 'merge(" << axis <<
", " <<
602 LFATAL(
"While attempting output transform 'merge(" << axis <<
", " <<
606 outs = std::move(newouts);
612 LFATAL(
"Internal error: Unsupported output transform op " <<
int(
o.op));
615 info.emplace_back(tftimer.
stop());
618 info.emplace_back(
"* Transformed Output Tensors");