JeVoisBase  1.22
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
Loading...
Searching...
No Matches
demo.py
Go to the documentation of this file.
1import argparse
2
3import numpy as np
4import cv2 as cv
5
6from mobilenet_v1 import MobileNetV1
7from mobilenet_v2 import MobileNetV2
8
9def str2bool(v):
10 if v.lower() in ['on', 'yes', 'true', 'y', 't']:
11 return True
12 elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
13 return False
14 else:
15 raise NotImplementedError
16
17backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
18targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
19help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
20help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
21try:
22 backends += [cv.dnn.DNN_BACKEND_TIMVX]
23 targets += [cv.dnn.DNN_TARGET_NPU]
24 help_msg_backends += "; {:d}: TIMVX"
25 help_msg_targets += "; {:d}: NPU"
26except:
27 print('This version of OpenCV does not support TIM-VX and NPU. Visit https://gist.github.com/fengyuentau/5a7a5ba36328f2b763aea026c43fa45f for more information.')
28
29parser = argparse.ArgumentParser(description='Demo for MobileNet V1 & V2.')
30parser.add_argument('--input', '-i', type=str, help='Path to the input image.')
31parser.add_argument('--model', '-m', type=str, choices=['v1', 'v2', 'v1-q', 'v2-q'], default='v1', help='Which model to use, either v1 or v2.')
32parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
33parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
34parser.add_argument('--label', '-l', type=str, default='./imagenet_labels.txt', help='Path to the dataset labels.')
35args = parser.parse_args()
36
37if __name__ == '__main__':
38 # Instantiate ResNet
39 models = {
40 'v1': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
41 'v2': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
42 'v1-q': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
43 'v2-q': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target)
44
45 }
46 model = models[args.model]
47
48 # Read image and get a 224x224 crop from a 256x256 resized
49 image = cv.imread(args.input)
50 image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
51 image = cv.resize(image, dsize=(256, 256))
52 image = image[16:240, 16:240, :]
53
54 # Inference
55 result = model.infer(image)
56
57 # Print result
58 print('label: {}'.format(result))
59