|
JeVoisBase
1.23
JeVois Smart Embedded Machine Vision Toolkit Base Modules
|
|
Functions | |
| str2bool (v) | |
| visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None) | |
| get_color_map_list (num_classes) | |
| visualize (image, result, weight=0.6, fps=None) | |
| visualize (image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0), text_color=(0, 255, 0), fontScale=1, fontSize=1) | |
| readImageFromDirectory (img_dir, w=128, h=256) | |
| visualize (image, res, points, points_color=(0, 255, 0), text_color=(0, 255, 0), fps=None) | |
| visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), isClosed=True, thickness=2, fps=None) | |
Variables | |
| list | backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA] |
| list | targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16] |
| str | help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA" |
| str | help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16" |
| parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).') | |
| type | |
| str | |
| help | |
| default | |
| int | |
| float | |
| False | |
| str2bool | |
| True | |
| args = parser.parse_args() | |
| model | |
| image = cv.imread(args.input) | |
| h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) | |
| w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) | |
| _ | |
| results = model.infer(image) | |
| int | deviceId = 0 |
| cap = cv.VideoCapture(deviceId) | |
| tm = cv.TickMeter() | |
| hasFrame | |
| frame = visualize(frame, results, fps=tm.getFPS()) | |
| choices | |
| recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target) | |
| detector | |
| img1 = cv.imread(args.input1) | |
| img2 = cv.imread(args.input2) | |
| face1 = detector.infer(img1) | |
| face2 = detector.infer(img2) | |
| result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1]) | |
| _image = cv.resize(image, dsize=(192, 192)) | |
| _frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) | |
| dict | models |
| _input = args.input | |
| int | device_id = 0 |
| video = cv.VideoCapture(_input) | |
| has_frame | |
| first_frame | |
| first_frame_copy = first_frame.copy() | |
| roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy) | |
| isLocated | |
| bbox | |
| score | |
| palm_box | |
| palm_landmarks | |
| net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target) | |
| query_img_list | |
| query_file_list | |
| gallery_img_list | |
| gallery_file_list | |
| topk_indices = net.query(query_img_list, gallery_img_list, args.topk) | |
| list | topk_matches = [] |
| results_vis = visualize(results, args.query_dir, args.gallery_dir) | |
| res | |
| points | |
| fps = tm.getFPS() | |
| float64 | |
| list | texts = [] |
| demo.get_color_map_list | ( | num_classes | ) |
Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.
Args:
num_classes (int): Number of classes.
Returns:
(list). The color map.
Definition at line 43 of file demo.py.
Referenced by visualize().
| demo.visualize | ( | image, | |
| bbox, | |||
| score, | |||
| isLocated, | |||
fps = None, |
|||
box_color = (0, 255, 0), |
|||
text_color = (0, 255, 0), |
|||
fontScale = 1, |
|||
fontSize = 1 |
|||
| ) |
| demo.visualize | ( | image, | |
| res, | |||
| points, | |||
points_color = (0, 255, 0), |
|||
text_color = (0, 255, 0), |
|||
fps = None |
|||
| ) |
| demo.visualize | ( | image, | |
| result, | |||
weight = 0.6, |
|||
fps = None |
|||
| ) |
Convert predict result to color image, and save added image.
Args:
image (str): The input image.
result (np.ndarray): The predict result of image.
weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6
fps (str): The FPS to be drawn on the input image.
Returns:
vis_result (np.ndarray): The visualized result.
Definition at line 69 of file demo.py.
References get_color_map_list().
| demo.visualize | ( | image, | |
| results, | |||
box_color = (0, 255, 0), |
|||
text_color = (0, 0, 255), |
|||
fps = None |
|||
| ) |
Definition at line 46 of file demo.py.
Referenced by visualize().
| demo.visualize | ( | image, | |
| results, | |||
box_color = (0, 255, 0), |
|||
text_color = (0, 0, 255), |
|||
isClosed = True, |
|||
thickness = 2, |
|||
fps = None |
|||
| ) |
Definition at line 51 of file demo.py.
References visualize().
|
protected |
| list demo.backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA] |
| demo.detector |
| demo.int |
Definition at line 37 of file demo.py.
Referenced by visualize(), and visualize().
| dict demo.model |
| dict demo.models |
| demo.net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target) |
| demo.parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).') |
| demo.recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target) |
| demo.roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy) |
| list demo.targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16] |
| demo.topk_indices = net.query(query_img_list, gallery_img_list, args.topk) |