6from mp_palmdet
import MPPalmDet
9 if v.lower()
in [
'on',
'yes',
'true',
'y',
't']:
11 elif v.lower()
in [
'off',
'no',
'false',
'n',
'f']:
14 raise NotImplementedError
16backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
17targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
18help_msg_backends =
"Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
19help_msg_targets =
"Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
21 backends += [cv.dnn.DNN_BACKEND_TIMVX]
22 targets += [cv.dnn.DNN_TARGET_NPU]
23 help_msg_backends +=
"; {:d}: TIMVX"
24 help_msg_targets +=
"; {:d}: NPU"
26 print(
'This version of OpenCV does not support TIM-VX and NPU. Visit https://gist.github.com/fengyuentau/5a7a5ba36328f2b763aea026c43fa45f for more information.')
28parser = argparse.ArgumentParser(description=
'Hand Detector from MediaPipe')
29parser.add_argument(
'--input',
'-i', type=str, help=
'Path to the input image. Omit for using default camera.')
30parser.add_argument(
'--model',
'-m', type=str, default=
'./palm_detection_mediapipe_2022may.onnx', help=
'Path to the model.')
31parser.add_argument(
'--backend',
'-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
32parser.add_argument(
'--target',
'-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
33parser.add_argument(
'--score_threshold', type=float, default=0.99, help=
'Filter out faces of confidence < conf_threshold. An empirical score threshold for the quantized model is 0.49.')
34parser.add_argument(
'--nms_threshold', type=float, default=0.3, help=
'Suppress bounding boxes of iou >= nms_threshold.')
35parser.add_argument(
'--save',
'-s', type=str, default=
False, help=
'Set true to save results. This flag is invalid when using camera.')
36parser.add_argument(
'--vis',
'-v', type=str2bool, default=
True, help=
'Set true to open a window for result visualization. This flag is invalid when using camera.')
37args = parser.parse_args()
39def visualize(image, score, palm_box, palm_landmarks, fps=None):
43 cv.putText(output,
'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
46 palm_box = palm_box.astype(np.int32)
47 cv.putText(output,
'{:.4f}'.format(score), (palm_box[0], palm_box[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0))
50 cv.rectangle(output, (palm_box[0], palm_box[1]), (palm_box[2], palm_box[3]), (0, 255, 0), 2)
53 palm_landmarks = palm_landmarks.astype(np.int32)
54 for p
in palm_landmarks:
55 cv.circle(output, p, 2, (0, 0, 255), 2)
59if __name__ ==
'__main__':
61 model = MPPalmDet(modelPath=args.model,
62 nmsThreshold=args.nms_threshold,
63 scoreThreshold=args.score_threshold,
64 backendId=args.backend,
68 if args.input
is not None:
69 image = cv.imread(args.input)
72 score, palm_box, palm_landmarks = model.infer(image)
73 if score
is None or palm_box
is None or palm_landmarks
is None:
74 print(
'Hand not detected')
77 print(
'score: {:.2f}'.format(score))
78 print(
'palm box: {}'.format(palm_box))
79 print(
'palm_landmarks: ')
80 for plm
in enumerate(palm_landmarks):
81 print(
'\t{}'.format(plm))
84 image =
visualize(image, score, palm_box, palm_landmarks)
88 print(
'Resutls saved to result.jpg\n')
89 cv.imwrite(
'result.jpg', image)
93 cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
94 cv.imshow(args.input, image)
98 cap = cv.VideoCapture(deviceId)
101 while cv.waitKey(1) < 0:
102 hasFrame, frame = cap.read()
104 print(
'No frames grabbed!')
109 score, palm_box, palm_landmarks = model.infer(frame)
113 if score
is not None and palm_box
is not None and palm_landmarks
is not None:
114 frame =
visualize(frame, score, palm_box, palm_landmarks, fps=tm.getFPS())
117 cv.imshow(
'MPPalmDet Demo', frame)
visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None)