JeVoisBase  1.20
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
demo.py
Go to the documentation of this file.
1 import argparse
2 
3 import numpy as np
4 import cv2 as cv
5 
6 from mobilenet_v1 import MobileNetV1
7 from mobilenet_v2 import MobileNetV2
8 
9 def str2bool(v):
10  if v.lower() in ['on', 'yes', 'true', 'y', 't']:
11  return True
12  elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
13  return False
14  else:
15  raise NotImplementedError
16 
17 backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
18 targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
19 help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
20 help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
21 try:
22  backends += [cv.dnn.DNN_BACKEND_TIMVX]
23  targets += [cv.dnn.DNN_TARGET_NPU]
24  help_msg_backends += "; {:d}: TIMVX"
25  help_msg_targets += "; {:d}: NPU"
26 except:
27  print('This version of OpenCV does not support TIM-VX and NPU. Visit https://gist.github.com/fengyuentau/5a7a5ba36328f2b763aea026c43fa45f for more information.')
28 
29 parser = argparse.ArgumentParser(description='Demo for MobileNet V1 & V2.')
30 parser.add_argument('--input', '-i', type=str, help='Path to the input image.')
31 parser.add_argument('--model', '-m', type=str, choices=['v1', 'v2', 'v1-q', 'v2-q'], default='v1', help='Which model to use, either v1 or v2.')
32 parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
33 parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
34 parser.add_argument('--label', '-l', type=str, default='./imagenet_labels.txt', help='Path to the dataset labels.')
35 args = parser.parse_args()
36 
37 if __name__ == '__main__':
38  # Instantiate ResNet
39  models = {
40  'v1': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
41  'v2': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
42  'v1-q': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
43  'v2-q': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target)
44 
45  }
46  model = models[args.model]
47 
48  # Read image and get a 224x224 crop from a 256x256 resized
49  image = cv.imread(args.input)
50  image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
51  image = cv.resize(image, dsize=(256, 256))
52  image = image[16:240, 16:240, :]
53 
54  # Inference
55  result = model.infer(image)
56 
57  # Print result
58  print('label: {}'.format(result))
59 
demo.str2bool
str2bool
Definition: demo.py:43
mobilenet_v1.MobileNetV1
Definition: mobilenet_v1.py:4
mobilenet_v2.MobileNetV2
Definition: mobilenet_v2.py:4