32#include <opencv2/imgproc/imgproc.hpp>
37#include <tensorflow/lite/kernels/register.h>
38#include <tensorflow/lite/optional_debug_tools.h>
39#include <tensorflow/lite/string_util.h>
45 int ret = vsnprintf(buf, 1024, format, args);
73 std::ifstream file(fname);
74 if (!file)
LFATAL(
"Could not open labels file " << fname);
78 while (std::getline(file, line))
labels.push_back(line);
82 int const padding = 16;
100 int const topn = top::get();
101 float const th = thresh::get() * 0.01F;
105 std::priority_queue<std::pair<float, int>, std::vector<std::pair<float, int> >,
106 std::greater<std::pair<float, int> > > top_result_pq;
108 float const scale = scorescale::get() * (input_floating ? 1.0 : 1.0F / 255.0F);
110 for (
int i = 0; i < prediction_size; ++i)
112 float value = prediction[i] * scale;
115 if (value < th)
continue;
117 top_result_pq.push(std::pair<float, int>(value, i));
120 if (top_result_pq.size() > topn) top_result_pq.pop();
124 while (!top_result_pq.empty())
126 auto const & tr = top_result_pq.top();
127 top_results.push_back( { tr.first * 100, std::string(
labels[tr.second]) } );
130 std::reverse(top_results.begin(), top_results.end());
139 if (
itsReadyFut.wait_for(std::chrono::milliseconds(5)) == std::future_status::ready)
142 throw std::logic_error(
"Loading already in progress. Attempt to load again rejected");
157 LINFO(
"Closing model..");
164 std::string root = dataroot::get();
if (root.empty() ==
false) root +=
'/';
165 std::string
const modelfile =
absolutePath(root + netdir::get() +
"/model.tflite");
166 std::string
const labelfile =
absolutePath(root + netdir::get() +
"/labels.txt");
168 LINFO(
"Using model from " << modelfile);
169 LINFO(
"Using labels from " << labelfile);
176 if (!
model)
LFATAL(
"Failed to mmap model " << modelfile);
177 LINFO(
"Loaded model " << modelfile);
179 tflite::ops::builtin::BuiltinOpResolver resolver;
190 for (
int i = 0; i < t_size; ++i)
198 if (threads::get())
interpreter->SetNumThreads(threads::get());
204 if (
interpreter->AllocateTensors() != kTfLiteOk)
LFATAL(
"Failed to allocate tensors");
206 LINFO(
"TensorFlow network ready");
223 if (
itsReady.load() ==
false)
throw std::logic_error(
"not ready yet...");
225 int const image_width = cvimg.cols;
226 int const image_height = cvimg.rows;
227 int const image_type = cvimg.type();
231 TfLiteIntArray * dims =
interpreter->tensor(input)->dims;
232 int const wanted_height = dims->data[1];
233 int const wanted_width = dims->data[2];
234 int const wanted_channels = dims->data[3];
236 if (wanted_channels != 1 && wanted_channels != 3)
237 LFATAL(
"Network wants " << wanted_channels <<
" input channels, but only 1 or 3 are supported");
238 if (wanted_channels == 3 && image_type != CV_8UC3)
LFATAL(
"Network wants RGB but input image is not CV_8UC3");
239 if (wanted_channels == 1 && image_type != CV_8UC1)
LFATAL(
"Network wants Gray but input image is not CV_8UC1");
241 if (image_width != wanted_width || image_height != wanted_height)
242 LFATAL(
"Wrong input size " << image_width <<
'x' << image_height <<
" but network wants "
243 << wanted_width <<
'x' << wanted_height);
250 memcpy(
interpreter->typed_tensor<uint8_t>(input), cvimg.data, cvimg.total() * cvimg.elemSize());
256 if (wanted_channels == 1) cvimg.convertTo(convimg, CV_32FC1, 1.0F / 127.5F, -1.0F);
257 else cvimg.convertTo(convimg, CV_32FC3, 1.0F / 127.5F, -1.0F);
258 memcpy(
interpreter->typed_tensor<
float>(input), convimg.data, convimg.total() * convimg.elemSize());
262 LFATAL(
"only uint8 or float32 network input pixel types are supported");
266 struct timeval start, stop;
267 gettimeofday(&start, 0);
269 gettimeofday(&stop, 0);
270 float predtime = (stop.tv_sec * 1000 + stop.tv_usec / 1000) - (start.tv_sec * 1000 + start.tv_usec / 1000);
272 const size_t num_results = 5;
273 const float threshold = 0.001f;
275 std::vector<std::pair<float, int>> top_results;
289 LFATAL(
"cannot handle output type " <<
interpreter->tensor(input)->type <<
" yet");
299 if (
itsReady.load() ==
false)
throw std::logic_error(
"not ready yet...");
303 TfLiteIntArray * dims =
interpreter->tensor(input)->dims;
304 h = dims->data[1]; w = dims->data[2]; c = dims->data[3];
#define JEVOIS_WAIT_GET_FUTURE(f)
std::atomic< bool > itsReady
virtual ~TensorFlow()
Virtual destructor for safe inheritance.
void readLabelsFile(std::string const &fname)
JeVoisReporter itsErrorReporter
float predict(cv::Mat const &cvimg, std::vector< jevois::ObjReco > &results)
Processing function, results are stored internally in the underlying TensorFlow network object.
TensorFlow(std::string const &instance)
Constructor.
void onParamChange(tflow::netdir const ¶m, std::string const &newval) override
std::unique_ptr< tflite::Interpreter > interpreter
void postInit() override
Initialize, configure and load the network in a thread.
std::future< void > itsReadyFut
std::atomic< bool > itsNeedReload
void get_top_n(T *prediction, int prediction_size, std::vector< jevois::ObjReco > &top_results, bool input_floating)
void postUninit() override
Un-initialize and free resources.
void getInDims(int &w, int &h, int &c)
Get input width, height, channels.
std::vector< std::string > labels
std::unique_ptr< tflite::FlatBufferModel > model
std::filesystem::path absolutePath(std::filesystem::path const &path="")
friend friend class Component
std::future< std::invoke_result_t< std::decay_t< Function >, std::decay_t< Args >... > > async(Function &&f, Args &&... args)
int Report(char const *format, va_list args) override