JeVoisBase  1.20
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
demo Namespace Reference

Functions

def str2bool (v)
 
def visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None)
 
def get_color_map_list (num_classes)
 
def visualize (image, result, weight=0.6, fps=None)
 
def visualize (image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0), text_color=(0, 255, 0), fontScale=1, fontSize=1)
 
def readImageFromDirectory (img_dir, w=128, h=256)
 
def visualize (image, res, points, points_color=(0, 255, 0), text_color=(0, 255, 0), fps=None)
 
def visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), isClosed=True, thickness=2, fps=None)
 

Variables

list backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]
 
list targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]
 
string help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"
 
string help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"
 
 parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')
 
 type
 
 str
 
 help
 
 default
 
 int
 
 float
 
 False
 
 str2bool
 
 True
 
 args = parser.parse_args()
 
 model
 
 image = cv.imread(args.input)
 
 h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
 
 w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
 
 results = model.infer(image)
 
int deviceId = 0
 
 cap = cv.VideoCapture(deviceId)
 
 tm = cv.TickMeter()
 
 hasFrame
 
 frame = visualize(frame, results, fps=tm.getFPS())
 
 choices
 
 recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target)
 
 detector
 
 img1 = cv.imread(args.input1)
 
 img2 = cv.imread(args.input2)
 
 face1 = detector.infer(img1)
 
 face2 = detector.infer(img2)
 
 result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1])
 
dictionary models
 
int device_id = 0
 
 video = cv.VideoCapture(_input)
 
 has_frame
 
 first_frame
 
 first_frame_copy = first_frame.copy()
 
 roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy)
 
 isLocated
 
 bbox
 
 score
 
 palm_box
 
 palm_landmarks
 
 net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target)
 
 query_img_list
 
 query_file_list
 
 gallery_img_list
 
 gallery_file_list
 
 topk_indices = net.query(query_img_list, gallery_img_list, args.topk)
 
list topk_matches = []
 
def results_vis = visualize(results, args.query_dir, args.gallery_dir)
 
 res
 
 points
 
 fps = tm.getFPS()
 
 float64
 
list texts = []
 

Function Documentation

◆ get_color_map_list()

def demo.get_color_map_list (   num_classes)
Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.

Args:
    num_classes (int): Number of classes.

Returns:
    (list). The color map.

Definition at line 43 of file demo.py.

Referenced by visualize().

◆ readImageFromDirectory()

def demo.readImageFromDirectory (   img_dir,
  w = 128,
  h = 256 
)

Definition at line 47 of file demo.py.

References visualize().

◆ str2bool()

def demo.str2bool (   v)

Definition at line 14 of file demo.py.

References str2bool.

◆ visualize() [1/5]

def demo.visualize (   image,
  bbox,
  score,
  isLocated,
  fps = None,
  box_color = (0, 255, 0),
  text_color = (0, 255, 0),
  fontScale = 1,
  fontSize = 1 
)

Definition at line 32 of file demo.py.

References int, and visualize().

◆ visualize() [2/5]

def demo.visualize (   image,
  res,
  points,
  points_color = (0, 255, 0),
  text_color = (0, 255, 0),
  fps = None 
)

Definition at line 33 of file demo.py.

References int, and visualize().

◆ visualize() [3/5]

def demo.visualize (   image,
  result,
  weight = 0.6,
  fps = None 
)
Convert predict result to color image, and save added image.

Args:
    image (str): The input image.
    result (np.ndarray): The predict result of image.
    weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6
    fps (str): The FPS to be drawn on the input image.

Returns:
    vis_result (np.ndarray): The visualized result.

Definition at line 69 of file demo.py.

References get_color_map_list(), int, and visualize().

◆ visualize() [4/5]

def demo.visualize (   image,
  results,
  box_color = (0, 255, 0),
  text_color = (0, 0, 255),
  fps = None 
)

Definition at line 46 of file demo.py.

Referenced by readImageFromDirectory(), and visualize().

◆ visualize() [5/5]

def demo.visualize (   image,
  results,
  box_color = (0, 255, 0),
  text_color = (0, 0, 255),
  isClosed = True,
  thickness = 2,
  fps = None 
)

Definition at line 51 of file demo.py.

References visualize().

Variable Documentation

◆ args

demo.args = parser.parse_args()

Definition at line 44 of file demo.py.

◆ backends

list demo.backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA]

Definition at line 22 of file demo.py.

◆ bbox

demo.bbox

Definition at line 90 of file demo.py.

◆ cap

demo.cap = cv.VideoCapture(deviceId)

Definition at line 113 of file demo.py.

◆ choices

demo.choices

Definition at line 45 of file demo.py.

◆ default

demo.default

Definition at line 36 of file demo.py.

◆ detector

demo.detector
Initial value:
1 = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2022mar.onnx',
2  inputSize=[320, 320],
3  confThreshold=0.9,
4  nmsThreshold=0.3,
5  topK=5000,
6  backendId=args.backend,
7  targetId=args.target)

Definition at line 54 of file demo.py.

◆ device_id

int demo.device_id = 0

Definition at line 63 of file demo.py.

◆ deviceId

int demo.deviceId = 0

Definition at line 112 of file demo.py.

◆ face1

demo.face1 = detector.infer(img1)

Definition at line 67 of file demo.py.

◆ face2

demo.face2 = detector.infer(img2)

Definition at line 70 of file demo.py.

◆ False

demo.False

Definition at line 42 of file demo.py.

◆ first_frame

demo.first_frame

Definition at line 68 of file demo.py.

◆ first_frame_copy

demo.first_frame_copy = first_frame.copy()

Definition at line 72 of file demo.py.

◆ float

◆ float64

demo.float64

Definition at line 46 of file demo.py.

◆ fps

demo.fps = tm.getFPS()

Definition at line 102 of file demo.py.

◆ frame

def demo.frame = visualize(frame, results, fps=tm.getFPS())

◆ gallery_file_list

demo.gallery_file_list

Definition at line 86 of file demo.py.

◆ gallery_img_list

demo.gallery_img_list

Definition at line 86 of file demo.py.

◆ h

demo.h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))

Definition at line 85 of file demo.py.

◆ has_frame

demo.has_frame

Definition at line 68 of file demo.py.

◆ hasFrame

demo.hasFrame

Definition at line 120 of file demo.py.

◆ help

demo.help

Definition at line 35 of file demo.py.

◆ help_msg_backends

string demo.help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA"

Definition at line 24 of file demo.py.

◆ help_msg_targets

string demo.help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16"

Definition at line 25 of file demo.py.

◆ image

def demo.image = cv.imread(args.input)

◆ img1

demo.img1 = cv.imread(args.input1)

Definition at line 62 of file demo.py.

Referenced by env_img_swap().

◆ img2

demo.img2 = cv.imread(args.input2)

Definition at line 63 of file demo.py.

Referenced by env_img_swap().

◆ int

◆ isLocated

demo.isLocated

Definition at line 90 of file demo.py.

◆ model

dictionary demo.model
Initial value:
1 = YuNet(modelPath=args.model,
2  inputSize=[320, 320],
3  confThreshold=args.conf_threshold,
4  nmsThreshold=args.nms_threshold,
5  topK=args.top_k,
6  backendId=args.backend,
7  targetId=args.target)

Definition at line 74 of file demo.py.

◆ models

dictionary demo.models
Initial value:
1 = {
2  'v1': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
3  'v2': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
4  'v1-q': MobileNetV1(modelPath='./image_classification_mobilenetv1_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target),
5  'v2-q': MobileNetV2(modelPath='./image_classification_mobilenetv2_2022apr-act_int8-wt_int8-quantized.onnx', labelPath=args.label, backendId=args.backend, targetId=args.target)
6 
7  }

Definition at line 39 of file demo.py.

◆ net

◆ palm_box

demo.palm_box

Definition at line 72 of file demo.py.

◆ palm_landmarks

demo.palm_landmarks

Definition at line 72 of file demo.py.

◆ parser

demo.parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).')

Definition at line 34 of file demo.py.

◆ points

◆ query_file_list

demo.query_file_list

Definition at line 85 of file demo.py.

◆ query_img_list

demo.query_img_list

Definition at line 85 of file demo.py.

◆ recognizer

demo.recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target)

Definition at line 52 of file demo.py.

◆ res

◆ result

◆ results

demo.results = model.infer(image)

Definition at line 89 of file demo.py.

Referenced by TensorFlow.predict(), Darknet.predict(), and QRcode.process().

◆ results_vis

def demo.results_vis = visualize(results, args.query_dir, args.gallery_dir)

Definition at line 103 of file demo.py.

◆ roi

demo.roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy)

Definition at line 75 of file demo.py.

Referenced by SaliencySURF.process().

◆ score

◆ str

◆ str2bool

def demo.str2bool

Definition at line 43 of file demo.py.

Referenced by str2bool().

◆ targets

list demo.targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16]

Definition at line 23 of file demo.py.

◆ texts

list demo.texts = []

Definition at line 79 of file demo.py.

◆ tm

demo.tm = cv.TickMeter()

Definition at line 118 of file demo.py.

◆ topk_indices

demo.topk_indices = net.query(query_img_list, gallery_img_list, args.topk)

Definition at line 89 of file demo.py.

◆ topk_matches

list demo.topk_matches = []

Definition at line 94 of file demo.py.

◆ True

demo.True

Definition at line 43 of file demo.py.

◆ type

demo.type

Definition at line 35 of file demo.py.

◆ video

demo.video = cv.VideoCapture(_input)

Definition at line 65 of file demo.py.

◆ w

demo.w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))

Definition at line 85 of file demo.py.

Referenced by QRcode.drawDetections(), FirstVision.drawDetections(), drawMap(), env_c_lowpass_5_x_dec_x_fewbits_optim(), env_c_lowpass_5_y_dec_y_fewbits_optim(), env_c_lowpass_9_x_fewbits_optim(), env_c_lowpass_9_y_fewbits_optim(), env_lowpass_5_x_dec_x(), env_lowpass_5_y_dec_y(), TensorFlow.getInDims(), Darknet.getInDims(), Yolo.getInDims(), RoadFinder.getPixels(), RoadFinder.getPixelsQuick(), DarknetSaliency.getSalROI(), TensorFlowSaliency.getSalROI(), FirstVision.learnHSV(), FirstVision.loadCameraCalibration(), ARtoolkit.manualinit(), Yolo.predict(), Darknet.predict(), SuperPixelSeg.process(), OpticalFlow.process(), DiceCounter.process(), DenseSift.process(), DemoBackgroundSubtract.process(), SaliencyGist.process(), SalientRegions.process(), DemoSaliency.process(), DemoQRcode.process(), DemoCPUGPU.process(), DemoIMU.process(), BurnTest.process(), RoadNavigation.process(), DemoARtoolkit.process(), MarkersCombo.process(), DNN.process(), ColorFiltering.process(), DemoNeon.process(), DarknetYOLO.process(), DarknetSingle.process(), SaliencySURF.process(), DemoDMP.process(), DemoArUco.process(), ObjectTracker.process(), SurpriseRecorder.process(), TensorFlowEasy.process(), TensorFlowSingle.process(), DarknetSaliency.process(), TensorFlowSaliency.process(), ObjectDetect.process(), SaveVideo.process(), ArUcoBlob.process(), DetectionDNN.process(), FirstVision.process(), Darknet.resizeInDims(), Yolo.resizeInDims(), FirstVision.sendAllSerial(), ArUcoBlob.sendBlobs(), and ArUco.sendSerial().