JeVoisBase
1.20
JeVois Smart Embedded Machine Vision Toolkit Base Modules
|
|
Functions | |
def | str2bool (v) |
def | visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None) |
def | get_color_map_list (num_classes) |
def | visualize (image, result, weight=0.6, fps=None) |
def | visualize (image, bbox, score, isLocated, fps=None, box_color=(0, 255, 0), text_color=(0, 255, 0), fontScale=1, fontSize=1) |
def | readImageFromDirectory (img_dir, w=128, h=256) |
def | visualize (image, res, points, points_color=(0, 255, 0), text_color=(0, 255, 0), fps=None) |
def | visualize (image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), isClosed=True, thickness=2, fps=None) |
Variables | |
list | backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA] |
list | targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16] |
string | help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA" |
string | help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16" |
parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).') | |
type | |
str | |
help | |
default | |
int | |
float | |
False | |
str2bool | |
True | |
args = parser.parse_args() | |
model | |
image = cv.imread(args.input) | |
h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) | |
w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) | |
results = model.infer(image) | |
int | deviceId = 0 |
cap = cv.VideoCapture(deviceId) | |
tm = cv.TickMeter() | |
hasFrame | |
frame = visualize(frame, results, fps=tm.getFPS()) | |
choices | |
recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target) | |
detector | |
img1 = cv.imread(args.input1) | |
img2 = cv.imread(args.input2) | |
face1 = detector.infer(img1) | |
face2 = detector.infer(img2) | |
result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1]) | |
dictionary | models |
int | device_id = 0 |
video = cv.VideoCapture(_input) | |
has_frame | |
first_frame | |
first_frame_copy = first_frame.copy() | |
roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy) | |
isLocated | |
bbox | |
score | |
palm_box | |
palm_landmarks | |
net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target) | |
query_img_list | |
query_file_list | |
gallery_img_list | |
gallery_file_list | |
topk_indices = net.query(query_img_list, gallery_img_list, args.topk) | |
list | topk_matches = [] |
def | results_vis = visualize(results, args.query_dir, args.gallery_dir) |
res | |
points | |
fps = tm.getFPS() | |
float64 | |
list | texts = [] |
def demo.get_color_map_list | ( | num_classes | ) |
Returns the color map for visualizing the segmentation mask, which can support arbitrary number of classes. Args: num_classes (int): Number of classes. Returns: (list). The color map.
Definition at line 43 of file demo.py.
Referenced by visualize().
def demo.readImageFromDirectory | ( | img_dir, | |
w = 128 , |
|||
h = 256 |
|||
) |
Definition at line 47 of file demo.py.
References visualize().
def demo.visualize | ( | image, | |
bbox, | |||
score, | |||
isLocated, | |||
fps = None , |
|||
box_color = (0, 255, 0) , |
|||
text_color = (0, 255, 0) , |
|||
fontScale = 1 , |
|||
fontSize = 1 |
|||
) |
Definition at line 32 of file demo.py.
References int, and visualize().
def demo.visualize | ( | image, | |
res, | |||
points, | |||
points_color = (0, 255, 0) , |
|||
text_color = (0, 255, 0) , |
|||
fps = None |
|||
) |
Definition at line 33 of file demo.py.
References int, and visualize().
def demo.visualize | ( | image, | |
result, | |||
weight = 0.6 , |
|||
fps = None |
|||
) |
Convert predict result to color image, and save added image. Args: image (str): The input image. result (np.ndarray): The predict result of image. weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6 fps (str): The FPS to be drawn on the input image. Returns: vis_result (np.ndarray): The visualized result.
Definition at line 69 of file demo.py.
References get_color_map_list(), int, and visualize().
def demo.visualize | ( | image, | |
results, | |||
box_color = (0, 255, 0) , |
|||
text_color = (0, 0, 255) , |
|||
fps = None |
|||
) |
Definition at line 46 of file demo.py.
Referenced by readImageFromDirectory(), and visualize().
def demo.visualize | ( | image, | |
results, | |||
box_color = (0, 255, 0) , |
|||
text_color = (0, 0, 255) , |
|||
isClosed = True , |
|||
thickness = 2 , |
|||
fps = None |
|||
) |
Definition at line 51 of file demo.py.
References visualize().
list demo.backends = [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_CUDA] |
demo.detector |
demo.float |
Definition at line 39 of file demo.py.
Referenced by RoadFinder.computeRoadCenterPoint(), PyCoralDetect.PyCoralDetect.get_objects(), PyDetectionDNN.PyDetectionDNN.postprocess(), Yolo.predict(), Darknet.predict(), PyPostDAMOyolo.PyPostDAMOyolo.process(), DarknetSaliency.process(), TensorFlowSaliency.process(), PyLicensePlate.PyLicensePlate.processGUI(), and PySceneText.PySceneText.processGUI().
Definition at line 120 of file demo.py.
Referenced by DetectionDNN.postprocess(), BufferedVideoReader.run(), SurpriseRecorder.run(), and SaveVideo.run().
string demo.help_msg_backends = "Choose one of the computation backends: {:d}: OpenCV implementation (default); {:d}: CUDA" |
string demo.help_msg_targets = "Chose one of the target computation devices: {:d}: CPU (default); {:d}: CUDA; {:d}: CUDA fp16" |
def demo.image = cv.imread(args.input) |
Definition at line 84 of file demo.py.
Referenced by ARtoolkit.detectMarkers(), ArUco.detectMarkers(), Yolo.predict(), Darknet.predict(), QRcode.process(), and DiceCounter.process().
demo.img1 = cv.imread(args.input1) |
Definition at line 62 of file demo.py.
Referenced by env_img_swap().
demo.img2 = cv.imread(args.input2) |
Definition at line 63 of file demo.py.
Referenced by env_img_swap().
demo.int |
Definition at line 37 of file demo.py.
Referenced by transform.CenterCrop.__call__(), utils.metrics.base_metric.BaseMetric.__init__(), utils.dataloaders.classification.ClassificationImageLoader.__init__(), PyPoseDetector.calc_bounding_rect(), PyFaceMesh.PyFaceMesh.calc_bounding_rect(), PyHandDetector.PyHandDetector.calc_bounding_rect(), PyHandDetector.PyHandDetector.calc_palm_moment(), RoadFinder.computeVanishingLines(), FirstVision.detect(), PyFaceMesh.PyFaceMesh.draw_landmarks(), PyPoseDetector.draw_landmarks(), PyHandDetector.PyHandDetector.draw_landmarks(), ArUco.drawDetections(), PythonObject6D.PythonObject6D.drawDetections(), FirstPython.FirstPython.drawDetections(), FirstVision.drawDetections(), FirstVision.estimatePose(), PyCoralDetect.PyCoralDetect.get_objects(), utils.dataloaders.base_dataloader._VideoStream.getFrameSize(), RoadFinder.getLineFitness(), DarknetSaliency.getSalROI(), TensorFlowSaliency.getSalROI(), lpd_yunet.LPD_YuNet.infer(), PyDetectionDNN.PyDetectionDNN.postprocess(), DetectionDNN.postprocess(), download_data.Downloader.printRequest(), PyPreBlob.PyPreBlob.process(), PyNetOpenCV.PyNetOpenCV.process(), DenseSift.process(), DemoSaliency.process(), BurnTest.process(), RoadFinder.process(), ObjectTracker.process(), ArUcoBlob.process(), PyPostDepth.PyPostDepth.report(), PyPostURetinex.PyPostURetinex.report(), and visualize().
dictionary demo.model |
dictionary demo.models |
demo.net = YoutuReID(modelPath=args.model, backendId=args.backend, targetId=args.target) |
demo.parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).') |
demo.points |
Definition at line 68 of file demo.py.
Referenced by RoadFinder.combine(), RoadFinder.findLine2(), RoadFinder.fitLine(), RoadFinder.getLineFitness(), RoadFinder.getPixels(), RoadFinder.getPixelsQuick(), RoadFinder.trackVanishingLines(), and RoadFinder.updateLine().
demo.recognizer = SFace(modelPath=args.model, disType=args.dis_type, backendId=args.backend, targetId=args.target) |
demo.res |
Definition at line 68 of file demo.py.
Referenced by SuperPixel.process(), ObjectRecognitionMNIST.train(), ObjectRecognitionILAB.train(), and ObjectRecognitionCIFAR.train().
Definition at line 74 of file demo.py.
Referenced by ARtoolkit.detectInternal(), env_chan_color(), env_chan_intensity(), env_chan_orientation(), env_chan_steerable(), env_dec_x(), env_dec_xy(), env_dec_y(), env_lowpass_5_x_dec_x(), env_lowpass_5_y_dec_y(), env_motion_channel_input_and_consume_pyr(), env_rescale(), ObjectRecognitionMNIST.train(), ObjectRecognitionILAB.train(), and ObjectRecognitionCIFAR.train().
demo.results = model.infer(image) |
Definition at line 89 of file demo.py.
Referenced by TensorFlow.predict(), Darknet.predict(), and QRcode.process().
demo.roi = cv.selectROI('DaSiamRPN Demo', first_frame_copy) |
Definition at line 75 of file demo.py.
Referenced by SaliencySURF.process().
demo.score |
Definition at line 90 of file demo.py.
Referenced by RoadFinder.computeVanishingLines(), RoadFinder.findLine2(), RoadFinder.getLineFitness(), Darknet.predict(), RoadFinder.trackVanishingLines(), and RoadFinder.updateLine().
demo.str |
Definition at line 35 of file demo.py.
Referenced by FirstVision.detect(), PyPoseDetector.draw_landmarks(), ObjectDetect.parseSerial(), SaveVideo.parseSerial(), ArUcoBlob.process(), and benchmark.Benchmark.run().
def demo.str2bool |
Definition at line 43 of file demo.py.
Referenced by str2bool().
list demo.targets = [cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16] |
demo.topk_indices = net.query(query_img_list, gallery_img_list, args.topk) |
Definition at line 85 of file demo.py.
Referenced by QRcode.drawDetections(), FirstVision.drawDetections(), drawMap(), env_c_lowpass_5_x_dec_x_fewbits_optim(), env_c_lowpass_5_y_dec_y_fewbits_optim(), env_c_lowpass_9_x_fewbits_optim(), env_c_lowpass_9_y_fewbits_optim(), env_lowpass_5_x_dec_x(), env_lowpass_5_y_dec_y(), TensorFlow.getInDims(), Darknet.getInDims(), Yolo.getInDims(), RoadFinder.getPixels(), RoadFinder.getPixelsQuick(), DarknetSaliency.getSalROI(), TensorFlowSaliency.getSalROI(), FirstVision.learnHSV(), FirstVision.loadCameraCalibration(), ARtoolkit.manualinit(), Yolo.predict(), Darknet.predict(), SuperPixelSeg.process(), OpticalFlow.process(), DiceCounter.process(), DenseSift.process(), DemoBackgroundSubtract.process(), SaliencyGist.process(), SalientRegions.process(), DemoSaliency.process(), DemoQRcode.process(), DemoCPUGPU.process(), DemoIMU.process(), BurnTest.process(), RoadNavigation.process(), DemoARtoolkit.process(), MarkersCombo.process(), DNN.process(), ColorFiltering.process(), DemoNeon.process(), DarknetYOLO.process(), DarknetSingle.process(), SaliencySURF.process(), DemoDMP.process(), DemoArUco.process(), ObjectTracker.process(), SurpriseRecorder.process(), TensorFlowEasy.process(), TensorFlowSingle.process(), DarknetSaliency.process(), TensorFlowSaliency.process(), ObjectDetect.process(), SaveVideo.process(), ArUcoBlob.process(), DetectionDNN.process(), FirstVision.process(), Darknet.resizeInDims(), Yolo.resizeInDims(), FirstVision.sendAllSerial(), ArUcoBlob.sendBlobs(), and ArUco.sendSerial().