JeVoisBase  1.8
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
TensorFlowSingle Class Reference

Identify objects using TensorFlow deep neural network. More...

Inheritance diagram for TensorFlowSingle:
Collaboration diagram for TensorFlowSingle:

Public Member Functions

 TensorFlowSingle (std::string const &instance)
 Constructor. More...
 
virtual ~TensorFlowSingle ()
 Virtual destructor for safe inheritance. More...
 
virtual void postUninit () override
 Un-initialization. More...
 
void sendAllSerial ()
 Send serial messages. More...
 
virtual void process (jevois::InputFrame &&inframe) override
 Processing function, no video output. More...
 
virtual void process (jevois::InputFrame &&inframe, jevois::OutputFrame &&outframe) override
 Processing function with video output to USB. More...
 
- Public Member Functions inherited from jevois::Module
 Module (std::string const &instance)
 
virtual ~Module ()
 
virtual void sendSerial (std::string const &str)
 
virtual void parseSerial (std::string const &str, std::shared_ptr< UserInterface > s)
 
virtual void supportedCommands (std::ostream &os)
 
size_t frameNum () const
 
- Public Member Functions inherited from jevois::Component
 Component (std::string const &instance)
 
virtual ~Component ()
 
std::shared_ptr< ComponentgetSubComponent (std::string const &instance) const
 
std::shared_ptr< Comp > addSubComponent (std::string const &instance, Args &&...args)
 
void removeSubComponent (std::shared_ptr< Comp > &component)
 
void removeSubComponent (std::string const &instance, bool warnIfNotFound=true)
 
std::shared_ptr< Comp > getSubComponent (std::string const &instance) const
 
bool isTopLevel () const
 
bool initialized () const
 
std::string const & className () const
 
std::string const & instanceName () const
 
std::vector< std::string > setParamVal (std::string const &paramdescriptor, T const &val)
 
void setParamValUnique (std::string const &paramdescriptor, T const &val)
 
std::vector< std::pair< std::string, T > > getParamVal (std::string const &paramdescriptor) const
 
getParamValUnique (std::string const &paramdescriptor) const
 
std::vector< std::string > setParamString (std::string const &paramdescriptor, std::string const &val)
 
void setParamStringUnique (std::string const &paramdescriptor, std::string const &val)
 
std::vector< std::pair< std::string, std::string > > getParamString (std::string const &paramdescriptor) const
 
std::string getParamStringUnique (std::string const &paramdescriptor) const
 
void freezeParam (std::string const &paramdescriptor)
 
void unFreezeParam (std::string const &paramdescriptor)
 
void freezeAllParams ()
 
void unFreezeAllParams ()
 
std::string descriptor () const
 
void setParamsFromFile (std::string const &filename)
 
std::istream & setParamsFromStream (std::istream &is, std::string const &absfile)
 
virtual void paramInfo (std::shared_ptr< UserInterface > s, std::map< std::string, std::string > &categs, bool skipFrozen, std::string const &cname="", std::string const &pfx="")
 
void setPath (std::string const &path)
 
std::string absolutePath (std::string const &path="")
 
std::shared_ptr< Comp > addSubComponent (std::string const &instance, Args &&...args)
 
void removeSubComponent (std::shared_ptr< Comp > &component)
 
void removeSubComponent (std::string const &instance, bool warnIfNotFound=true)
 
std::shared_ptr< Comp > getSubComponent (std::string const &instance) const
 
bool isTopLevel () const
 
bool initialized () const
 
std::string const & className () const
 
std::string const & instanceName () const
 
std::vector< std::string > setParamVal (std::string const &paramdescriptor, T const &val)
 
void setParamValUnique (std::string const &paramdescriptor, T const &val)
 
std::vector< std::pair< std::string, T > > getParamVal (std::string const &paramdescriptor) const
 
getParamValUnique (std::string const &paramdescriptor) const
 
std::vector< std::string > setParamString (std::string const &paramdescriptor, std::string const &val)
 
void setParamStringUnique (std::string const &paramdescriptor, std::string const &val)
 
std::vector< std::pair< std::string, std::string > > getParamString (std::string const &paramdescriptor) const
 
std::string getParamStringUnique (std::string const &paramdescriptor) const
 
void freezeParam (std::string const &paramdescriptor)
 
void unFreezeParam (std::string const &paramdescriptor)
 
void freezeAllParams ()
 
void unFreezeAllParams ()
 
std::string descriptor () const
 
void setParamsFromFile (std::string const &filename)
 
std::istream & setParamsFromStream (std::istream &is, std::string const &absfile)
 
virtual void paramInfo (std::shared_ptr< UserInterface > s, std::map< std::string, std::string > &categs, bool skipFrozen, std::string const &cname="", std::string const &pfx="")
 
void setPath (std::string const &path)
 
std::string absolutePath (std::string const &path="")
 
- Public Member Functions inherited from jevois::ParameterRegistry
virtual ~ParameterRegistry ()
 

Protected Attributes

std::shared_ptr< TensorFlowitsTensorFlow
 
std::vector< TensorFlow::predresultitsResults
 
std::future< float > itsPredictFut
 
cv::Mat itsRawInputCv
 
cv::Mat itsCvImg
 
cv::Mat itsRawPrevOutputCv
 
unsigned long itsFrame
 

Additional Inherited Members

- Protected Member Functions inherited from jevois::Component
virtual void preInit ()
 
virtual void postInit ()
 
virtual void preUninit ()
 
virtual void preInit ()
 
virtual void postInit ()
 
virtual void preUninit ()
 
- Protected Member Functions inherited from jevois::ParameterRegistry
void addParameter (ParameterBase *const param)
 
void removeParameter (ParameterBase *const param)
 
void callbackInitCall ()
 

Detailed Description

Identify objects using TensorFlow deep neural network.

TensorFlow is a popular neural network framework. This module identifies the object in a square region in the center of the camera field of view using a deep convolutional neural network.

The deep network analyzes the image by filtering it using many different filter kernels, and several stacked passes (network layers). This essentially amounts to detecting the presence of both simple and complex parts of known objects in the image (e.g., from detecting edges in lower layers of the network to detecting car wheels or even whole cars in higher layers). The last layer of the network is reduced to a vector with one entry per known kind of object (object class). This module returns the class names of the top scoring candidates in the output vector, if any have scored above a minimum confidence threshold. When nothing is recognized with sufficiently high confidence, there is no output.

This module runs a TensorFlow network and shows the top-scoring results. Larger deep networks can be a bit slow, hence the network prediction is only run once in a while. Point your camera towards some interesting object, make the object fit in the picture shown at right (which will be fed to the neural network), keep it stable, and wait for TensorFlow to tell you what it found. The framerate figures shown at the bottom left of the display reflect the speed at which each new video frame from the camera is processed, but in this module this just amounts to converting the image to RGB, sending it to the neural network for processing in a separate thread, and creating the demo display. Actual network inference speed (time taken to compute the predictions on one image) is shown at the bottom right. See below for how to trade-off speed and accuracy.

Note that by default this module runs different flavors of MobileNets trained on the ImageNet dataset. There are 1000 different kinds of objects (object classes) that these networks can recognize (too long to list here). The input layer of these networks is 299x299, 224x224, 192x192, 160x160, or 128x128 pixels by default, depending on the network used. This modules takes a crop at the center of the video image, with size determined by the USB video size: the crop size is USB output width - 2 - camera sensor image width. With the default network parameters, this module hence requires at least 320x240 camera sensor resolution. The networks provided on the JeVois microSD image have been trained on large clusters of GPUs, using 1.2 million training images from the ImageNet dataset.

For more information about MobileNets, see https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md

For more information about the ImageNet dataset used for training, see http://www.image-net.org/challenges/LSVRC/2012/

Sometimes this module will make mistakes! The performance of mobilenets is about 40% to 70% correct (mean average precision) on the test set, depending on network size (bigger networks are more accurate but slower).

Neural network size and speed

When using a video mapping with USB output, the cropped window sent to the network is automatically sized to a square size that is the difference between the USB output video width and the camera sensor input width minus 16 pixels (e.g., when USB video mode is 560x240 and camera sensor mode is 320x240, the network will be resized to 224x224 since 224=560-16-320).

The network actual input size varies depending on which network is used; for example, mobilenet_v1_0.25_128_quant expects 128x128 input images, while mobilenet_v1_1.0_224 expects 224x224. We automatically rescale the cropped window to the network's desired input size. Note that there is a cost to rescaling, so, for best performance, you should match the USB output width to be the camera sensor width + 2 + network input width.

For example:

  • with USB output 464x240 (crop size 128x128), mobilenet_v1_0.25_128_quant (network size 128x128), runs at about 12ms/prediction (83.3 frames/s).
  • with USB output 464x240 (crop size 128x128), mobilenet_v1_0.5_128_quant (network size 128x128), runs at about 26ms/prediction (38.5 frames/s).
  • with USB output 560x240 (crop size 224x224), mobilenet_v1_0.25_224_quant (network size 224x224), runs at about 35ms/prediction (28.5 frames/s).
  • with USB output 560x240 (crop size 224x224), mobilenet_v1_1.0_224_quant (network size 224x224), runs at about 185ms/prediction (5.4 frames/s).

When using a videomapping with no USB output, the image crop is directly taken to match the network input size, so that no resizing occurs.

Note that network dims must always be such that they fit inside the camera input image.

To easily select one of the available networks, see JEVOIS:/modules/JeVois/TensorFlowSingle/params.cfg on the microSD card of your JeVois camera.

Serial messages

  • On every frame where detection results were obtained, this module sends a message
      TFF framenum
    where framenum is the frame number (starts at 0).
  • In addition, when detections are found, up to top messages will be sent, for those category candidates that have scored above thresh:
      TFR category score
    where category is the category name (from labels.txt in the network's data directory) and score is the confidence score from 0.0 to 100.0

Using your own network

For a step-by-step tutorial, see Training custom TensorFlow networks for JeVois.

This module supports RGB or grayscale inputs, byte or float32. You should create and train your network using fast GPUs, and then follow the instruction here to convert your trained network to TFLite format:

https://www.tensorflow.org/mobile/tflite/

Then you just need to create a directory under JEVOIS:/share/tensorflow/ with the name of your network, and, in there, two files, labels.txt with the category labels, and model.tflite with your model converted to TensorFlow Lite (flatbuffer format). Finally, edit JEVOIS:/modules/JeVois/TensorFlowEasy/params.cfg to select your new network when the module is launched.

Author
Laurent Itti
Display Name:
TensorFlow Single
Videomapping:
NONE 0 0 0.0 YUYV 320 240 30.0 JeVois TensorFlowSingle
Videomapping:
YUYV 560 240 15.0 YUYV 320 240 15.0 JeVois TensorFlowSingle
Videomapping:
YUYV 464 240 15.0 YUYV 320 240 15.0 JeVois TensorFlowSingle
Videomapping:
YUYV 880 480 15.0 YUYV 640 480 15.0 JeVois TensorFlowSingle
Email:
itti@usc.edu
Address:
University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
Main URL:
http://jevois.org
Support URL:
http://jevois.org/doc
Other URL:
http://iLab.usc.edu
License:
GPL v3
Distribution:
Unrestricted
Restrictions:
None

Definition at line 152 of file TensorFlowSingle.C.

Constructor & Destructor Documentation

◆ TensorFlowSingle()

TensorFlowSingle::TensorFlowSingle ( std::string const &  instance)
inline

Constructor.

Definition at line 158 of file TensorFlowSingle.C.

References itsTensorFlow.

◆ ~TensorFlowSingle()

virtual TensorFlowSingle::~TensorFlowSingle ( )
inlinevirtual

Virtual destructor for safe inheritance.

Definition at line 166 of file TensorFlowSingle.C.

Member Function Documentation

◆ postUninit()

virtual void TensorFlowSingle::postUninit ( )
inlineoverridevirtual

Un-initialization.

Reimplemented from jevois::Component.

Definition at line 172 of file TensorFlowSingle.C.

References itsPredictFut.

◆ process() [1/2]

virtual void TensorFlowSingle::process ( jevois::InputFrame &&  inframe)
inlineoverridevirtual

Processing function, no video output.

Reimplemented from jevois::Module.

Definition at line 189 of file TensorFlowSingle.C.

References jevois::rawimage::cvImage(), jevois::RawImage::height, itsCvImg, itsFrame, itsResults, itsTensorFlow, LFATAL, LINFO, sendAllSerial(), and jevois::RawImage::width.

◆ process() [2/2]

◆ sendAllSerial()

void TensorFlowSingle::sendAllSerial ( )
inline

Send serial messages.

Definition at line 180 of file TensorFlowSingle.C.

References itsFrame, itsResults, jevois::Module::sendSerial(), jevois::sformat(), and to_string().

Referenced by process().

Member Data Documentation

◆ itsCvImg

cv::Mat TensorFlowSingle::itsCvImg
protected

Definition at line 380 of file TensorFlowSingle.C.

Referenced by process().

◆ itsFrame

unsigned long TensorFlowSingle::itsFrame
protected

Definition at line 382 of file TensorFlowSingle.C.

Referenced by process(), and sendAllSerial().

◆ itsPredictFut

std::future<float> TensorFlowSingle::itsPredictFut
protected

Definition at line 378 of file TensorFlowSingle.C.

Referenced by postUninit(), and process().

◆ itsRawInputCv

cv::Mat TensorFlowSingle::itsRawInputCv
protected

Definition at line 379 of file TensorFlowSingle.C.

Referenced by process().

◆ itsRawPrevOutputCv

cv::Mat TensorFlowSingle::itsRawPrevOutputCv
protected

Definition at line 381 of file TensorFlowSingle.C.

Referenced by process().

◆ itsResults

std::vector<TensorFlow::predresult> TensorFlowSingle::itsResults
protected

Definition at line 377 of file TensorFlowSingle.C.

Referenced by process(), and sendAllSerial().

◆ itsTensorFlow

std::shared_ptr<TensorFlow> TensorFlowSingle::itsTensorFlow
protected

Definition at line 376 of file TensorFlowSingle.C.

Referenced by process(), and TensorFlowSingle().


The documentation for this class was generated from the following file: