JeVoisBase  1.22
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
Loading...
Searching...
No Matches
demo Namespace Reference

Functions

 str2bool (v)
 
 visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None)
 
 get_color_map_list (num_classes)
 
 visualize (image, result, weight=0.6, fps=None)
 
 visualize (image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0), text_color=(0, 255, 0), fontScale=1, fontSize=1)
 
 readImageFromDirectory (img_dir, w=128, h=256)
 
 visualize (image, res, points, points_color=(0, 255, 0), text_color=(0, 255, 0), fps=None)
 
 visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), isClosed=True, thickness=2, fps=None)
 

Variables

list backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
 
list targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
 
str help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
 
str help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
 
 parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
 
 type
 
 str
 
 help
 
 default
 
 int
 
 float
 
 False
 
 str2bool
 
 True
 
 args = parser.parse_args()
 
 model
 
 image = cv.imread(args.input)
 
 h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
 
 w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
 
 _
 
 results = model.infer(image)
 
int deviceId = 0
 
 cap = cv.VideoCapture(deviceId)
 
 tm = cv.TickMeter()
 
 hasFrame
 
 frame = visualize(frame, results, fps=tm.getFPS())
 
 choices
 
 recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target)
 
 detector
 
 img1 = cv.imread(args.input1)
 
 img2 = cv.imread(args.input2)
 
 face1 = detector.infer(img1)
 
 face2 = detector.infer(img2)
 
 result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1])
 
 _image = cv.resize(image, dsize=(192, 192))
 
 _frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
 
dict models
 
 _input = args.input
 
int device_id = 0
 
 video = cv.VideoCapture(_input)
 
 has_frame
 
 first_frame
 
 first_frame_copy = first_frame.copy()
 
 roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy)
 
 isLocated
 
 bbox
 
 score
 
 palm_box
 
 palm_landmarks
 
 net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target)
 
 query_img_list
 
 query_file_list
 
 gallery_img_list
 
 gallery_file_list
 
 topk_indices = net.query(query_img_list, gallery_img_list, args.topk)
 
list topk_matches = []
 
 results_vis = visualize(results, args.query_dir, args.gallery_dir)
 
 res
 
 points
 
 fps = tm.getFPS()
 
 float64
 
list texts = []
 

Function Documentation

◆ get_color_map_list()

demo.get_color_map_list (   num_classes)
Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.

Args:
    num_classes (int): Number of classes.

Returns:
    (list). The color map.

Definition at line 43 of file demo.py.

Referenced by visualize().

◆ readImageFromDirectory()

demo.readImageFromDirectory (   img_dir,
  w = 128,
  h = 256 
)

Definition at line 47 of file demo.py.

◆ str2bool()

demo.str2bool (   v)

Definition at line 14 of file demo.py.

◆ visualize() [1/5]

demo.visualize (   image,
  bbox,
  score,
  isLocated,
  fps = None,
  box_color = (0, 255, 0),
  text_color = (0, 255, 0),
  fontScale = 1,
  fontSize = 1 
)

Definition at line 32 of file demo.py.

References int.

◆ visualize() [2/5]

demo.visualize (   image,
  res,
  points,
  points_color = (0, 255, 0),
  text_color = (0, 255, 0),
  fps = None 
)

Definition at line 33 of file demo.py.

References int.

◆ visualize() [3/5]

demo.visualize (   image,
  result,
  weight = 0.6,
  fps = None 
)
Convert predict result to color image, and save added image.

Args:
    image (str): The input image.
    result (np.ndarray): The predict result of image.
    weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6
    fps (str): The FPS to be drawn on the input image.

Returns:
    vis_result (np.ndarray): The visualized result.

Definition at line 69 of file demo.py.

References get_color_map_list().

◆ visualize() [4/5]

demo.visualize (   image,
  results,
  box_color = (0, 255, 0),
  text_color = (0, 0, 255),
  fps = None 
)

Definition at line 46 of file demo.py.

Referenced by visualize().

◆ visualize() [5/5]

demo.visualize (   image,
  results,
  box_color = (0, 255, 0),
  text_color = (0, 0, 255),
  isClosed = True,
  thickness = 2,
  fps = None 
)

Definition at line 51 of file demo.py.

References visualize().

Variable Documentation

◆ _

demo._
protected

Definition at line 85 of file demo.py.

◆ _frame

demo._frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
protected

Definition at line 140 of file demo.py.

◆ _image

demo._image = cv.resize(image, dsize=(192, 192))
protected

Definition at line 108 of file demo.py.

◆ _input

int demo._input = args.input
protected

Definition at line 61 of file demo.py.

◆ args

demo.args = parser.parse_args()

Definition at line 44 of file demo.py.

◆ backends

list demo.backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]

Definition at line 22 of file demo.py.

◆ bbox

demo.bbox

Definition at line 90 of file demo.py.

◆ cap

demo.cap = cv.VideoCapture(deviceId)

Definition at line 113 of file demo.py.

◆ choices

demo.choices

Definition at line 45 of file demo.py.

◆ default

demo.default

Definition at line 36 of file demo.py.

◆ detector

demo.detector
Initial value:
1= YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2022mar.onnx',
2 inputSize=[320, 320],
3 confThreshold=0.9,
4 nmsThreshold=0.3,
5 topK=5000,
6 backendId=args.backend,
7 targetId=args.target)

Definition at line 54 of file demo.py.

◆ device_id

int demo.device_id = 0

Definition at line 63 of file demo.py.

◆ deviceId

int demo.deviceId = 0

Definition at line 112 of file demo.py.

◆ face1

demo.face1 = detector.infer(img1)

Definition at line 67 of file demo.py.

◆ face2

demo.face2 = detector.infer(img2)

Definition at line 70 of file demo.py.

◆ False

demo.False

Definition at line 42 of file demo.py.

◆ first_frame

demo.first_frame

Definition at line 68 of file demo.py.

◆ first_frame_copy

demo.first_frame_copy = first_frame.copy()

Definition at line 72 of file demo.py.

◆ float

demo.float

Definition at line 39 of file demo.py.

◆ float64

demo.float64

Definition at line 46 of file demo.py.

◆ fps

demo.fps = tm.getFPS()

Definition at line 102 of file demo.py.

◆ frame

demo.frame = visualize(frame, results, fps=tm.getFPS())

Definition at line 120 of file demo.py.

◆ gallery_file_list

demo.gallery_file_list

Definition at line 86 of file demo.py.

◆ gallery_img_list

demo.gallery_img_list

Definition at line 86 of file demo.py.

◆ h

demo.h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))

Definition at line 85 of file demo.py.

◆ has_frame

demo.has_frame

Definition at line 68 of file demo.py.

◆ hasFrame

demo.hasFrame

Definition at line 120 of file demo.py.

◆ help

demo.help

Definition at line 35 of file demo.py.

◆ help_msg_backends

str demo.help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"

Definition at line 24 of file demo.py.

◆ help_msg_targets

str demo.help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"

Definition at line 25 of file demo.py.

◆ image

demo.image = cv.imread(args.input)

Definition at line 84 of file demo.py.

◆ img1

demo.img1 = cv.imread(args.input1)

Definition at line 62 of file demo.py.

◆ img2

demo.img2 = cv.imread(args.input2)

Definition at line 63 of file demo.py.

◆ int

demo.int

Definition at line 37 of file demo.py.

Referenced by visualize(), and visualize().

◆ isLocated

demo.isLocated

Definition at line 90 of file demo.py.

◆ model

dict demo.model
Initial value:
1= YuNet(modelPath=args.model,
2 inputSize=[320, 320],
3 confThreshold=args.conf_threshold,
4 nmsThreshold=args.nms_threshold,
5 topK=args.top_k,
6 backendId=args.backend,
7 targetId=args.target)

Definition at line 74 of file demo.py.

◆ models

dict demo.models
Initial value:
1= {
2 'v1': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
3 'v2': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
4 'v1-q': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
5 'v2-q': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target)
6
7 }

Definition at line 39 of file demo.py.

◆ net

demo.net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target)

Definition at line 82 of file demo.py.

◆ palm_box

demo.palm_box

Definition at line 72 of file demo.py.

◆ palm_landmarks

demo.palm_landmarks

Definition at line 72 of file demo.py.

◆ parser

demo.parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')

Definition at line 34 of file demo.py.

◆ points

demo.points

Definition at line 68 of file demo.py.

◆ query_file_list

demo.query_file_list

Definition at line 85 of file demo.py.

◆ query_img_list

demo.query_img_list

Definition at line 85 of file demo.py.

◆ recognizer

demo.recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target)

Definition at line 52 of file demo.py.

◆ res

demo.res

Definition at line 68 of file demo.py.

◆ result

dict demo.result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1])

Definition at line 74 of file demo.py.

◆ results

demo.results = model.infer(image)

Definition at line 89 of file demo.py.

◆ results_vis

demo.results_vis = visualize(results, args.query_dir, args.gallery_dir)

Definition at line 103 of file demo.py.

◆ roi

demo.roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy)

Definition at line 75 of file demo.py.

◆ score

demo.score

Definition at line 90 of file demo.py.

◆ str

demo.str

Definition at line 35 of file demo.py.

◆ str2bool

demo.str2bool

Definition at line 43 of file demo.py.

◆ targets

list demo.targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]

Definition at line 23 of file demo.py.

◆ texts

list demo.texts = []

Definition at line 79 of file demo.py.

◆ tm

demo.tm = cv.TickMeter()

Definition at line 118 of file demo.py.

◆ topk_indices

demo.topk_indices = net.query(query_img_list, gallery_img_list, args.topk)

Definition at line 89 of file demo.py.

◆ topk_matches

list demo.topk_matches = []

Definition at line 94 of file demo.py.

◆ True

demo.True

Definition at line 43 of file demo.py.

◆ type

demo.type

Definition at line 35 of file demo.py.

◆ video

demo.video = cv.VideoCapture(_input)

Definition at line 65 of file demo.py.

◆ w

demo.w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))

Definition at line 85 of file demo.py.