12from yunet
import YuNet
15 if v.lower()
in [
'on',
'yes',
'true',
'y',
't']:
17 elif v.lower()
in [
'off',
'no',
'false',
'n',
'f']:
20 raise NotImplementedError
22backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
23targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
24help_msg_backends =
"Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
25help_msg_targets =
"Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
27 backends += [cv.dnn.DNN_BACKEND_TIMVX]
28 targets += [cv.dnn.DNN_TARGET_NPU]
29 help_msg_backends +=
"; {:d}: TIMVX"
30 help_msg_targets +=
"; {:d}: NPU"
32 print(
'This version of OpenCV does not support TIM-VX and NPU. Visit https://gist.github.com/fengyuentau/5a7a5ba36328f2b763aea026c43fa45f for more information.')
34parser = argparse.ArgumentParser(description=
'YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
35parser.add_argument(
'--input',
'-i', type=str, help=
'Path to the input image. Omit for using default camera.')
36parser.add_argument(
'--model',
'-m', type=str, default=
'face_detection_yunet_2022mar.onnx', help=
'Path to the model.')
37parser.add_argument(
'--backend',
'-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
38parser.add_argument(
'--target',
'-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
39parser.add_argument(
'--conf_threshold', type=float, default=0.9, help=
'Filter out faces of confidence < conf_threshold.')
40parser.add_argument(
'--nms_threshold', type=float, default=0.3, help=
'Suppress bounding boxes of iou >= nms_threshold.')
41parser.add_argument(
'--top_k', type=int, default=5000, help=
'Keep top_k bounding boxes before NMS.')
42parser.add_argument(
'--save',
'-s', type=str, default=
False, help=
'Set true to save results. This flag is invalid when using camera.')
43parser.add_argument(
'--vis',
'-v', type=str2bool, default=
True, help=
'Set true to open a window for result visualization. This flag is invalid when using camera.')
44args = parser.parse_args()
46def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=
None):
57 cv.putText(output,
'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
59 for det
in (results
if results
is not None else []):
60 bbox = det[0:4].astype(np.int32)
61 cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2)
64 cv.putText(output,
'{:.4f}'.format(conf), (bbox[0], bbox[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, text_color)
66 landmarks = det[4:14].astype(np.int32).reshape((5,2))
67 for idx, landmark
in enumerate(landmarks):
68 cv.circle(output, landmark, 2, landmark_color[idx], 2)
72if __name__ ==
'__main__':
74 model =
YuNet(modelPath=args.model,
76 confThreshold=args.conf_threshold,
77 nmsThreshold=args.nms_threshold,
79 backendId=args.backend,
83 if args.input
is not None:
84 image = cv.imread(args.input)
88 model.setInputSize([w, h])
89 results = model.infer(image)
92 print(
'{} faces detected.'.format(results.shape[0]))
93 for idx, det
in enumerate(results):
94 print(
'{}: {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}'.format(
103 print(
'Resutls saved to result.jpg\n')
104 cv.imwrite(
'result.jpg', image)
108 cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
109 cv.imshow(args.input, image)
113 cap = cv.VideoCapture(deviceId)
114 w =
int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
115 h =
int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
116 model.setInputSize([w, h])
119 while cv.waitKey(1) < 0:
120 hasFrame, frame = cap.read()
122 print(
'No frames grabbed!')
127 results = model.infer(frame)
131 frame =
visualize(frame, results, fps=tm.getFPS())
134 cv.imshow(
'YuNet Demo', frame)
visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None)