43 if (val.empty())
return;
46 std::vector<std::string> ops =
jevois::split(val,
"\\s*;\\s*");
49 for (std::string
const & op : ops)
51 Oper
o;
bool syntax_error =
true;
52 std::vector<std::string> tok =
jevois::split(op,
"(\\s*\\(\\s*|\\s*,\\s*|\\s*\\)\\s*)");
56 if (tok[0] ==
"shape")
61 o.op = Operator::Shape;
62 o.tnum.emplace_back(std::stoul(tok[1]));
64 for (
size_t v : newshape)
o.newvals.emplace_back(
int(v));
68 if (syntax_error)
LFATAL(
"Syntax error, expected: shape(outnum, AxBxC...)");
71 else if (tok[0] ==
"transpose")
76 o.op = Operator::Transpose;
78 else o.tnum.emplace_back(std::stoul(tok[1]));
79 for (
size_t i = 2; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
83 if (syntax_error)
LFATAL(
"Syntax error, expected: transpose(outnum, oldaxisA, oldaxisB, ...), where "
84 "transposed new axis 0 (the outermost dimension, typically batch size) will be "
85 "from oldaxisA, new axis 1 from oldaxisB, etc");
88 else if (tok[0] ==
"order")
93 o.op = Operator::Order;
94 for (
size_t i = 1; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
98 if (syntax_error)
LFATAL(
"Syntax error, expected: order(oldidx0, oldidx1, ...), where the new order will be "
99 "new tensor 0: old tensor oldidx0 (which is zero-based); new tensor 1: "
100 "old tensor oldidx1, etc. It is ok to have duplicated or missing entries.");
103 else if (tok[0] ==
"split")
108 o.op = Operator::Split;
110 else o.tnum.emplace_back(std::stoul(tok[1]));
111 o.tnum.emplace_back(std::stoul(tok[2]));
112 for (
size_t i = 3; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
113 syntax_error =
false;
116 if (syntax_error)
LFATAL(
"Syntax error, expected: split(outnum, axis, newsize1, ..., newsizeN), where "
117 "axis 0 is the outtermost dimension (typically, batch size), and newsize1 + ... "
118 "+ newsizeN must be equal to the original size of that axis.");
121 else if (tok[0] ==
"merge")
126 o.op = Operator::Merge;
127 o.tnum.emplace_back(std::stoul(tok[1]));
128 for (
size_t i = 2; i < tok.size(); ++i)
o.newvals.emplace_back(
int(std::stoul(tok[i])));
129 syntax_error =
false;
132 for (
size_t i = 0; i <
o.newvals.size() - 1; ++i)
133 if (
o.newvals[i] >
o.newvals[i+1]) { syntax_error =
true;
break; }
136 if (syntax_error)
LFATAL(
"Syntax error, expected: merge(axis, outnum1, ..., outnumN), where "
137 "axis 0 is the outermost dimension (typically, batch size) and outnum1, ..., "
138 "outnumN are the outputs to merge along that axis. All the outputs to be merged "
139 "must have matching number of dimensions, and matching sizes on all other axes. "
140 "The merged tensor will replace the first output listed in the merge, and the other "
141 "listed will be removed. Outputs to merge must be listed in ascending order (use "
142 "an order() transform first if needed)");
145 else LFATAL(
"Syntax error: Unrecognized operation: " << op);
147 itsOps.emplace_back(
o);
202 std::vector<std::string> & info)
204 if (ready() ==
false)
LFATAL(
"Network is not ready");
208 std::vector<cv::Mat> outs;
209 std::string
const c = comment::get();
212 std::string
const extra = extraintensors::get();
213 if (extra.empty() ==
false)
217 std::vector<cv::Mat> newblobs = blobs;
219 std::vector<std::string> ins =
jevois::split(extra,
",\\s*");
220 for (std::string
const & in : ins)
222 vsi_nn_tensor_attr_t attr; memset(&attr, 0,
sizeof(attr));
226 LFATAL(
"Malformed extra tensor, need <type>:<shape>:val1 val2 ... valN (separate multiple tensors by comma)");
229 if (tok[0] ==
"8U") attr.dtype.vx_type = VSI_NN_TYPE_UINT8;
230 else if (tok[0] ==
"8S") attr.dtype.vx_type = VSI_NN_TYPE_INT8;
231 else if (tok[0] ==
"16U") attr.dtype.vx_type = VSI_NN_TYPE_UINT16;
232 else if (tok[0] ==
"16S") attr.dtype.vx_type = VSI_NN_TYPE_INT16;
233 else if (tok[0] ==
"16F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT16;
234 else if (tok[0] ==
"32S") attr.dtype.vx_type = VSI_NN_TYPE_INT32;
235 else if (tok[0] ==
"32F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT32;
236 else if (tok[0] ==
"64F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT64;
237 else throw std::range_error(
"Unsupported extra input tensor type [" + tok[0] +
"] in " + extra);
241 attr.dim_num = dims.size();
242 for (
size_t i = 0; i < attr.dim_num; ++i) attr.size[attr.dim_num - 1 - i] = dims[i];
245 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_NONE;
246 attr.dtype.fmt = VSI_NN_DIM_FMT_AUTO;
250 std::vector<std::string> vals =
jevois::split(tok[2],
"\\s+");
251 size_t const nvals = vals.size();
252 if (nvals != b.total())
253 LFATAL(
"Extra in tensor needs " << b.total() <<
" values, but " << nvals <<
" given in [" << in <<
']');
254 switch (attr.dtype.vx_type)
256 case VSI_NN_TYPE_UINT8:
258 uint8_t * ptr =
reinterpret_cast<uint8_t *
>(b.data);
259 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
263 case VSI_NN_TYPE_INT8:
265 int8_t * ptr =
reinterpret_cast<int8_t *
>(b.data);
266 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
270 case VSI_NN_TYPE_UINT16:
272 uint16_t * ptr =
reinterpret_cast<uint16_t *
>(b.data);
273 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
277 case VSI_NN_TYPE_INT16:
279 int16_t * ptr =
reinterpret_cast<int16_t *
>(b.data);
280 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
284 case VSI_NN_TYPE_FLOAT16:
286 cv::hfloat * ptr =
reinterpret_cast<cv::hfloat *
>(b.data);
287 for (std::string
const & v : vals) *ptr++ = cv::hfloat(std::stof(v));
291 case VSI_NN_TYPE_INT32:
293 int32_t * ptr =
reinterpret_cast<int32_t *
>(b.data);
294 for (std::string
const & v : vals) *ptr++ = std::stoi(v);
298 case VSI_NN_TYPE_FLOAT32:
300 float * ptr =
reinterpret_cast<float *
>(b.data);
301 for (std::string
const & v : vals) *ptr++ = std::stof(v);
305 case VSI_NN_TYPE_FLOAT64:
307 double * ptr =
reinterpret_cast<double *
>(b.data);
308 for (std::string
const & v : vals) *ptr++ = std::stod(v);
312 default:
LFATAL(
"internal inconsistency");
315 newblobs.emplace_back(std::move(b));
322 info.emplace_back(
"* Input Tensors");
324 info.emplace_back(eitimer.
stop());
327 info.emplace_back(
"* Network");
328 if (c.empty() ==
false) info.emplace_back(c);
330 outs = doprocess(newblobs, info);
335 info.emplace_back(
"* Input Tensors");
339 info.emplace_back(
"* Network");
340 if (c.empty() ==
false) info.emplace_back(c);
342 outs = doprocess(blobs, info);
346 info.emplace_back(
"* Output Tensors");
350 if (itsOps.empty() ==
false)
354 info.emplace_back(
"* Output Tensors Transforms");
356 for (Oper
const &
o : itsOps)
360 case Operator::Shape:
363 size_t const tnum =
o.tnum[0];
367 outs[tnum] = outs[tnum].reshape(1,
o.newvals);
368 info.emplace_back(
"- shape out " + std::to_string(tnum) +
" to " +
jevois::dnn::shapestr(outs[tnum]));
372 LFATAL(
"While attempting output transform 'shape(" << tnum <<
", " <<
374 jevois::dnn::shapestr(outs[tnum]) <<
" to desired dims because of total number of elements mismatch");
380 case Operator::Transpose:
383 size_t tnum =
o.tnum[0];
388 std::vector<std::future<void>> fvec;
389 for (
size_t t = 0; t < outs.size(); ++t)
397 cv::Mat newout; cv::transposeND(outs[t],
o.newvals, newout); outs[t] = std::move(newout);
401 LFATAL(
"While attempting output transform 'transpose(" << t <<
", " <<
jevois::join(
o.newvals,
", ") <<
403 " to desired shape, check number of dimensions and that the desired axes contain every "
404 "source axis number exactly once.");
418 cv::Mat newout; cv::transposeND(outs[tnum],
o.newvals, newout); outs[tnum] = std::move(newout);
419 info.emplace_back(
"- transpose out " + std::to_string(tnum) +
" to " +
jevois::dnn::shapestr(outs[tnum]));
423 LFATAL(
"While attempting output transform 'transpose(" << tnum <<
", " <<
jevois::join(
o.newvals,
", ") <<
425 " to desired shape, check number of dimensions and that the desired axes contain every source axis "
426 "number exactly once.");
433 case Operator::Order:
435 std::vector<cv::Mat> newouts;
int const osiz = int(outs.size());
437 for (
int idx :
o.newvals)
438 if (idx >= 0 && idx < osiz)
439 newouts.push_back(outs[idx]);
442 ")': Output number " << idx <<
" does not exist");
444 info.emplace_back(
"- order: " +
jevois::join(
o.newvals,
", "));
445 outs = std::move(newouts);
450 case Operator::Split:
452 size_t const tnum =
o.tnum[0];
453 size_t const axis =
o.tnum[1];
455 std::vector<cv::Mat> newouts;
456 for (
size_t i = 0; i < outs.size(); ++i)
465 std::string inf =
"- split out " + std::to_string(i) +
" to ";
466 for (cv::Mat & m : mats)
469 newouts.emplace_back(m);
471 info.emplace_back(inf.substr(0, inf.length() - 2));
473 catch (std::exception
const & e)
475 LFATAL(
"While attempting output transform 'split(" << i <<
", " << axis <<
", " <<
480 LFATAL(
"While attempting output transform 'split(" << i <<
", " << axis <<
", " <<
484 else newouts.push_back(outs[i]);
486 outs = std::move(newouts);
491 case Operator::Merge:
493 size_t const axis =
o.tnum[0];
size_t const numouts = outs.size();
494 std::vector<cv::Mat> newouts;
498 int action[numouts];
bool didmerge =
false; std::vector<cv::Mat> tomerge;
499 for (
int i = 0; i < int(numouts); ++i)
501 if (outs[i].type() != CV_32F && outs[i].type() != CV_64F && outs[i].type() != CV_16F)
502 LFATAL(
"While attempting output transform 'merge(" << axis <<
", " <<
jevois::join(
o.newvals,
", ") <<
503 ")': Cannot merge quantized tensors");
506 bool inmerge =
false;
for (
int j :
o.newvals)
if (i == j) { inmerge =
true;
break; }
509 tomerge.push_back(outs[i]);
510 if (didmerge ==
false) { action[i] = 1; didmerge =
true; }
else action[i] = 2;
516 for (
size_t i = 0; i < numouts; ++i)
522 newouts.push_back(outs[i]);
527 info.emplace_back(
"- merged outs " +
jevois::join(
o.newvals,
", ") +
" into " +
529 std::to_string(newouts.size()-1) +
')');
535 default:
LFATAL(
"Internal inconsistency in merge() transform");
538 catch (std::exception
const & e)
540 LFATAL(
"While attempting output transform 'merge(" << axis <<
", " <<
545 LFATAL(
"While attempting output transform 'merge(" << axis <<
", " <<
549 outs = std::move(newouts);
555 LFATAL(
"Internal error: Unsupported output transform op " <<
int(
o.op));
558 info.emplace_back(tftimer.
stop());
561 info.emplace_back(
"* Transformed Output Tensors");