27 std::ifstream ifs(fname);
28 if (ifs.is_open() ==
false)
LFATAL(
"Failed to open file " << fname);
30 size_t linenum = 1; std::map<int, std::string> ret;
int id = 0;
31 for (std::string line; std::getline(ifs, line); ++linenum)
33 size_t idx1 = line.find_first_not_of(
" \t");
if (idx1 == line.npos)
continue;
34 size_t idx2 = line.find_last_not_of(
" \t\r\n");
if (idx2 == line.npos)
continue;
35 if (line[idx1] ==
'#')
continue;
37 try {
id = std::stoi(line, &idx1); idx1 = line.find_first_not_of(
"0123456789 \t,:", idx1); }
catch (...) { }
39 std::string classname;
42 LERROR(fname <<
':' << linenum <<
": empty class name -- REPLACING BY 'unspecified'");
43 classname =
"unspecified";
45 else classname = line.substr(idx1, idx2 + 1 - idx1);
48 classname = std::regex_replace(classname, std::regex(
"\"\""),
"\"");
51 size_t len = classname.length();
52 if (len > 1 && classname[0] ==
'"' && classname[len-1] ==
'"') classname = classname.substr(1, len-2);
62 LINFO(
"Loaded " << ret.size() <<
" class names from " << fname);
70 auto itr = labels.find(
id);
79 for (
char const c : label) col = c + ((col << 5) - col);
80 col = (col & 0xffffff) | (alpha << 24);
85 void jevois::dnn::topK(
float const * pfProb,
float * pfMaxProb, uint32_t * pMaxClass, uint32_t outputCount,
88 memset(pfMaxProb, 0xfe,
sizeof(
float) * topNum);
89 memset(pMaxClass, 0xff,
sizeof(
float) * topNum);
91 for (uint32_t j = 0; j < topNum; ++j)
93 for (uint32_t i = 0; i < outputCount; ++i)
96 for (k = 0; k < topNum; ++k)
if (i == pMaxClass[k])
break;
97 if (k != topNum)
continue;
99 if (pfProb[i] > pfMaxProb[j]) { pfMaxProb[j] = pfProb[i]; pMaxClass[j] = i; }
107 cv::MatSize
const & ms = m.size;
int const nd = ms.dims();
109 for (
int i = 0; i < nd; ++i) ret +=
std::to_string(ms[i]) + (i < nd-1 ?
"x" :
"");
118 TfLiteIntArray
const & dims = *t->dims;
120 for (
int i = 0; i < dims.size; ++i) ret +=
std::to_string(dims.data[i]) + (i < dims.size-1 ?
"x" :
"");
125 case kTfLiteNoType: ret +=
" NoType";
break;
126 case kTfLiteFloat32: ret +=
" 32F";
break;
127 case kTfLiteInt32: ret +=
" 32S";
break;
128 case kTfLiteUInt8: ret +=
" 8U";
break;
129 case kTfLiteInt64: ret +=
" 64S";
break;
130 case kTfLiteString: ret +=
" String";
break;
131 case kTfLiteBool: ret +=
" 8B";
break;
132 case kTfLiteInt16: ret +=
" 16S";
break;
133 case kTfLiteComplex64: ret +=
" 64C";
break;
134 case kTfLiteInt8: ret +=
" 8I";
break;
135 case kTfLiteFloat16: ret +=
" 16F";
break;
136 case kTfLiteFloat64: ret +=
" 64F";
break;
137 case kTfLiteComplex128: ret +=
" 128C";
break;
138 default: ret +=
" UnknownType";
break;
147 for (uint32_t i = 0; i < attr.dim_num; ++i)
148 ret +=
std::to_string(attr.size[attr.dim_num-1-i]) + (i < attr.dim_num-1 ?
"x" :
"");
151 switch (attr.dtype.vx_type)
153 case VSI_NN_TYPE_UINT8: ret +=
" 8U";
break;
154 case VSI_NN_TYPE_INT8: ret +=
" 8S";
break;
155 case VSI_NN_TYPE_BOOL8: ret +=
" 8B";
break;
156 case VSI_NN_TYPE_UINT16: ret +=
" 16U";
break;
157 case VSI_NN_TYPE_INT16: ret +=
" 16S";
break;
158 case VSI_NN_TYPE_FLOAT16: ret +=
" 16F";
break;
159 case VSI_NN_TYPE_BFLOAT16: ret +=
" 16B";
break;
160 case VSI_NN_TYPE_UINT32: ret +=
" 32U";
break;
161 case VSI_NN_TYPE_INT32: ret +=
" 32S";
break;
162 case VSI_NN_TYPE_FLOAT32: ret +=
" 32F";
break;
163 case VSI_NN_TYPE_UINT64: ret +=
" 64U";
break;
164 case VSI_NN_TYPE_INT64: ret +=
" 64S";
break;
165 case VSI_NN_TYPE_FLOAT64: ret +=
" 64F";
break;
166 default:
throw std::range_error(
"shapestr: Unsupported tensor type " +
std::to_string(attr.dtype.vx_type));
175 std::vector<size_t> ret;
177 for (std::string
const & t : tok) ret.emplace_back(std::stoi(t));
186 case kTfLiteFloat32:
return CV_32F;
187 case kTfLiteInt32:
return CV_32S;
188 case kTfLiteUInt8:
return CV_8U;
189 case kTfLiteInt16:
return CV_16S;
190 case kTfLiteInt8:
return CV_8S;
191 case kTfLiteFloat16:
return CV_16S;
192 case kTfLiteFloat64:
return CV_64F;
200 LFATAL(
"Unsupported type " << TfLiteTypeGetName(t));
209 case VSI_NN_TYPE_UINT8:
return CV_8U;
210 case VSI_NN_TYPE_INT8:
return CV_8S;
211 case VSI_NN_TYPE_BOOL8:
return CV_8U;
212 case VSI_NN_TYPE_UINT16:
return CV_16U;
213 case VSI_NN_TYPE_INT16:
return CV_16S;
214 case VSI_NN_TYPE_FLOAT16:
return CV_16F;
215 case VSI_NN_TYPE_BFLOAT16:
return CV_16F;
217 case VSI_NN_TYPE_INT32:
return CV_32S;
218 case VSI_NN_TYPE_FLOAT32:
return CV_32F;
221 case VSI_NN_TYPE_FLOAT64:
return CV_64F;
222 default:
throw std::range_error(
"vsi2cv: Unsupported tensor type " +
std::to_string(t));
231 case kTfLiteFloat32:
return VSI_NN_TYPE_FLOAT32;
232 case kTfLiteInt32:
return VSI_NN_TYPE_INT32;
233 case kTfLiteUInt8:
return VSI_NN_TYPE_UINT8;
234 case kTfLiteInt16:
return VSI_NN_TYPE_INT16;
235 case kTfLiteInt8:
return VSI_NN_TYPE_INT8;
236 case kTfLiteFloat16:
return VSI_NN_TYPE_FLOAT16;
237 case kTfLiteFloat64:
return VSI_NN_TYPE_FLOAT64;
238 case kTfLiteInt64:
return VSI_NN_TYPE_INT64;
239 case kTfLiteBool:
return VSI_NN_TYPE_BOOL8;
240 case kTfLiteNoType:
return VSI_NN_TYPE_NONE;
245 LFATAL(
"Unsupported type " << TfLiteTypeGetName(t));
252 int tx = std::min(width - 1, std::max(0, r.x));
253 int ty = std::min(height - 1, std::max(0, r.y));
254 int bx = std::min(width - 1, std::max(0, r.x + r.width));
255 int by = std::min(height - 1, std::max(0, r.y + r.height));
256 r.x = tx; r.y = ty; r.width = bx - tx; r.height = by - ty;
262 char const *
const specdef =
"[NCHW:|NHWC:|NA:|AUTO:]Type:[NxCxHxW|NxHxWxC|...][:QNT[:fl|:scale:zero]]";
263 std::vector<std::string> spectok =
jevois::split(specs,
",\\s*");
264 std::vector<vsi_nn_tensor_attr_t> ret;
266 for (std::string
const & spec : spectok)
268 vsi_nn_tensor_attr_t attr; memset(&attr, 0,
sizeof(attr));
272 if (tok.size() < 2)
throw std::runtime_error(
"parseTensorSpecs: Malformed tensor spec ["+spec+
"] not "+specdef);
276 if (tok[0] ==
"NCHW") { ++n; attr.dtype.fmt = VSI_NN_DIM_FMT_NCHW; }
277 else if (tok[0] ==
"NHWC") { ++n; attr.dtype.fmt = VSI_NN_DIM_FMT_NHWC; }
278 else if (tok[0] ==
"NA") { ++n; attr.dtype.fmt = VSI_NN_DIM_FMT_NA; }
279 else if (tok[0] ==
"AUTO") { ++n; attr.dtype.fmt = VSI_NN_DIM_FMT_AUTO; }
280 else attr.dtype.fmt = VSI_NN_DIM_FMT_AUTO;
283 if (tok.size() < n+2)
throw std::runtime_error(
"parseTensorSpecs: Malformed tensor spec ["+spec+
"] not "+specdef);
286 if (tok[n] ==
"8U") attr.dtype.vx_type = VSI_NN_TYPE_UINT8;
287 else if (tok[n] ==
"8S") attr.dtype.vx_type = VSI_NN_TYPE_INT8;
288 else if (tok[n] ==
"8B") attr.dtype.vx_type = VSI_NN_TYPE_BOOL8;
289 else if (tok[n] ==
"16U") attr.dtype.vx_type = VSI_NN_TYPE_UINT16;
290 else if (tok[n] ==
"16S") attr.dtype.vx_type = VSI_NN_TYPE_INT16;
291 else if (tok[n] ==
"16F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT16;
292 else if (tok[n] ==
"16B") attr.dtype.vx_type = VSI_NN_TYPE_BFLOAT16;
293 else if (tok[n] ==
"32U") attr.dtype.vx_type = VSI_NN_TYPE_UINT32;
294 else if (tok[n] ==
"32S") attr.dtype.vx_type = VSI_NN_TYPE_INT32;
295 else if (tok[n] ==
"32F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT32;
296 else if (tok[n] ==
"64U") attr.dtype.vx_type = VSI_NN_TYPE_UINT64;
297 else if (tok[n] ==
"64S") attr.dtype.vx_type = VSI_NN_TYPE_INT64;
298 else if (tok[n] ==
"64F") attr.dtype.vx_type = VSI_NN_TYPE_FLOAT64;
299 else throw std::range_error(
"parseTensorSpecs: Invalid tensor type [" + tok[n] +
"] in " + spec);
304 attr.dim_num = dims.size();
305 for (
size_t i = 0; i < attr.dim_num; ++i) attr.size[attr.dim_num - 1 - i] = dims[i];
309 if (n == tok.size() || tok[n] ==
"NONE")
311 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_NONE;
313 else if (tok[n] ==
"DFP")
315 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_DFP;
316 if (tok.size() != n+2)
317 throw std::range_error(
"parseTensorSpecs: In "+spec+
", DFP quantization needs :fl param (" + specdef +
')');
318 attr.dtype.fl = std::stoi(tok[n+1]);
321 else if (tok[n] ==
"AA")
323 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_AFFINE_ASYMMETRIC;
324 if (tok.size() != n+3)
325 throw std::range_error(
"parseTensorSpecs: In "+spec+
", AA quantization needs :scale:zero params ("+specdef+
')');
326 attr.dtype.scale = std::stof(tok[n+1]);
327 attr.dtype.zero_point = std::stoi(tok[n+2]);
329 else if (tok[n] ==
"APS")
331 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_AFFINE_PERCHANNEL_SYMMETRIC;
332 throw std::range_error(
"parseTensorSpecs: In " + spec +
", AFFINE_PERCHANNEL_SYMMETRIC quant not yet supported");
334 else throw std::range_error(
"parseTensorSpecs: Invalid quantization type in " + spec);
337 ret.emplace_back(attr);
346 switch (attr.dtype.fmt)
348 case VSI_NN_DIM_FMT_NHWC:
349 if (attr.dim_num < 3)
throw std::range_error(
"attrsize: need at least 3D, got " +
jevois::dnn::attrstr(attr));
350 return cv::Size(attr.size[1], attr.size[2]);
352 case VSI_NN_DIM_FMT_NCHW:
354 if (attr.dim_num < 2)
throw std::range_error(
"attrsize: need at least 2D, got " +
jevois::dnn::attrstr(attr));
355 return cv::Size(attr.size[0], attr.size[1]);
365 switch (attr.dtype.fmt)
367 case VSI_NN_DIM_FMT_NCHW: ret +=
"NCHW:";
break;
368 case VSI_NN_DIM_FMT_NHWC: ret +=
"NHWC:";
break;
373 switch (attr.dtype.vx_type)
375 case VSI_NN_TYPE_UINT8: ret +=
"8U:";
break;
376 case VSI_NN_TYPE_INT8: ret +=
"8S:";
break;
377 case VSI_NN_TYPE_BOOL8: ret +=
"8B:";
break;
378 case VSI_NN_TYPE_UINT16: ret +=
"16U:";
break;
379 case VSI_NN_TYPE_INT16: ret +=
"16S:";
break;
380 case VSI_NN_TYPE_FLOAT16: ret +=
"16F:";
break;
381 case VSI_NN_TYPE_BFLOAT16: ret +=
"16B:";
break;
382 case VSI_NN_TYPE_UINT32: ret +=
"32U:";
break;
383 case VSI_NN_TYPE_INT32: ret +=
"32S:";
break;
384 case VSI_NN_TYPE_FLOAT32: ret +=
"32F:";
break;
385 case VSI_NN_TYPE_UINT64: ret +=
"64U:";
break;
386 case VSI_NN_TYPE_INT64: ret +=
"64S:";
break;
387 case VSI_NN_TYPE_FLOAT64: ret +=
"64F:";
break;
388 default: std::range_error(
"attrstr: Unsupported tensor type " +
std::to_string(attr.dtype.vx_type));
392 for (uint32_t i = 0; i < attr.dim_num; ++i)
393 ret +=
std::to_string(attr.size[attr.dim_num - 1 - i]) + ((i<attr.dim_num-1) ?
'x' :
':');
396 switch (attr.dtype.qnt_type)
398 case VSI_NN_QNT_TYPE_NONE: ret +=
":NONE";
break;
399 case VSI_NN_QNT_TYPE_DFP: ret +=
":DFP:" +
std::to_string(attr.dtype.fl);
break;
400 case VSI_NN_QNT_TYPE_AFFINE_ASYMMETRIC: ret +=
":AA:" +
std::to_string(attr.dtype.scale) +
':' +
402 case VSI_NN_QNT_TYPE_AFFINE_PERCHANNEL_SYMMETRIC: ret +=
":APS:unsupported";
break;
403 default: std::range_error(
"attrstr: Unsupported tensor quantization " +
std::to_string(attr.dtype.qnt_type));
412 vsi_nn_tensor_attr_t attr; memset(&attr, 0,
sizeof(attr));
413 attr.dtype.fmt = VSI_NN_DIM_FMT_AUTO;
416 switch (t->quantization.type)
418 case kTfLiteNoQuantization:
419 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_NONE;
422 case kTfLiteAffineQuantization:
424 attr.dtype.qnt_type = VSI_NN_QNT_TYPE_DFP;
432 default:
LFATAL(
"unsupported quantization " << t->quantization.type);
435 TfLiteIntArray
const & dims = *t->dims;
436 attr.dim_num = dims.size;
437 for (
int i = 0; i < dims.size; ++i) attr.size[dims.size - 1 - i] = dims.data[i];
446 float largest = -FLT_MAX;
447 for (
size_t i = 0; i < n; ++i)
if (input[i] > largest) largest = input[i];
448 for (
size_t i = 0; i < n; ++i)
450 float e = exp(input[i]/fac - largest/fac);
454 if (sum)
for (
size_t i = 0; i < n; ++i) output[i] /= sum;