JeVoisBase  1.22
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
Loading...
Searching...
No Matches
PyPostYOLOv8seg.py
Go to the documentation of this file.
1import pyjevois
2if pyjevois.pro: import libjevoispro as jevois
3else: import libjevois as jevois
4
5import numpy as np
6import cv2
7
8## Python DNN post-processor for YOLOv8-Seg
9#
10# Adapted from https://github.com/ultralytics/ultralytics/blob/main/examples/YOLOv8-Segmentation-ONNXRuntime-Python/main.py
11#
12# This network produces two outputs:
13# - 1x116x8400: standard YOLOv8 output (detected boxes and class confidences)
14# - 1x32x160x160: segmentation masks
15#
16# Here, we combine them to produce a final display.
17#
18# @author Laurent Itti
19#
20# @email itti\@usc.edu
21# @address University of Southern California, HNB-07A, 3641 Watt Way, Los Angeles, CA 90089-2520, USA
22# @copyright Copyright (C) 2024 by Laurent Itti, iLab and the University of Southern California
23# @mainurl http://jevois.org
24# @supporturl http://jevois.org/doc
25# @otherurl http://iLab.usc.edu
26# @license GPL v3
27# @distribution Unrestricted
28# @restrictions None
29# @ingroup pydnn
31 # ###################################################################################################
32 ## Constructor
33 def __init__(self):
34 # results of process(), held here for use by report():
35 self.boxes = []
36 self.segments = []
37 self.masks = []
38
39 # map from class index to class name:
40 self.classmap = None
41
42 # ###################################################################################################
43 ## JeVois parameters initialization. These can be set by users in the GUI or JeVois model zoo file
44 def init(self):
45 pc = jevois.ParameterCategory("DNN Post-Processing Options", "")
46
47 self.classes = jevois.Parameter(self, 'classes', 'str',
48 "Path to text file with names of object classes",
49 '', pc)
50 self.classes.setCallback(self.loadClassesloadClasses)
51
52 self.conf = jevois.Parameter(self, 'conf', 'float',
53 "Confidence threshold",
54 0.4, pc)
55
56 self.iou = jevois.Parameter(self, 'iou', 'float',
57 "Intersection-over-union (IOU) threshold",
58 0.45, pc)
59
60 self.smoothing = jevois.Parameter(self, 'smoothing', 'float',
61 "Amount of smoothing applied to contours, higher is smoother",
62 0.5, pc)
63
64 self.fillmask = jevois.Parameter(self, 'fillmask', 'bool',
65 "Whether to draw semi-transparent filled masks (requires approximating " +
66 "shapes by their convex hull)",
67 False, pc)
68
69 self.drawboxes = jevois.Parameter(self, 'drawboxes', 'bool',
70 "Whether to draw boxes and text labels around detected objects",
71 True, pc)
72
73 # ###################################################################################################
74 ## Freeze some parameters that should not be changed at runtime
75 def freeze(self, doit):
76 self.classes.freeze(doit)
77
78 # ###################################################################################################
79 ## Parameter callback: Load class names when 'classes' parameter value is changed by model zoo
80 def loadClasses(self, filename):
81 if filename:
82 jevois.LINFO(f"Loading {filename}...")
83 f = open(pyjevois.share + '/' + filename, 'rt') # will throw if file not found
84 self.classmap = f.read().rstrip('\n').split('\n')
85
86 # ###################################################################################################
87 ## Get network outputs
88 # outs is a list of numpy arrays for the network's outputs.
89 # preproc is a handle to the pre-processor that was used, useful to recover transforms from original image
90 # to cropped/resized network inputs.
91 def process(self, outs, preproc):
92 if (len(outs) != 2): jevois.LFATAL("Need 2 outputs: boxes, masks")
93 self.boxes, self.segments, self.masks = self.postprocess(outs, preproc, preproc.blobsize(0))
94
95 # ###################################################################################################
96 ## Helper to get class name and confidence as a clean string, and a color that varies with class name
97 def getLabel(self, id, score):
98 if self.classmap is None or id < 0 or id >= len(self.classmap): categ = 'unknown'
99 else: categ = self.classmap[id]
100
101 color = jevois.stringToRGBA(categ, 255)
102
103 return ( ("%s: %.2f" % (categ, score * 100.0)), color & 0xffffffff)
104
105 # ###################################################################################################
106 ## Report the latest results obtained by process() by drawing them
107 # outimg is None or a RawImage to draw into when in Legacy mode (drawing to an image sent to USB)
108 # helper is None or a GUIhelper to do OpenGL drawings when in JeVois-Pro GUI mode
109 # overlay is True if user wishes to see overlay text
110 # idle is true if keyboard/mouse have been idle for a while, which typically would reduce what is displayed
111 #
112 # Note that report() is called on every frame even though the network may run slower or take some time to load and
113 # initialize, thus you should be prepared for report() being called even before process() has ever been called
114 # (i.e., create some class member variables to hold the reported results, initialize them to some defaults in your
115 # constructor, report their current values here, and update their values in process()).
116 def report(self, outimg, helper, overlay, idle):
117
118 # Legacy JeVois mode: Write results into YUYV RawImage to send over USB:
119 if outimg is not None:
120 jevois.LFATAL("Sorry, legacy mode not supported by PyPostYOLOv8seg.py")
121
122 # JeVois-Pro mode: Write the results as OpenGL overlay boxes and text on top of the video:
123 if helper is not None and overlay:
124 for i in range(len(self.segments)):
125 x1, y1, x2, y2, score, cla = self.boxes[i]
126 label, color = self.getLabel(int(cla), score);
127
128 # Draw shape outline, and possibly filled mask using OpenGL, which requires convex contours:
129 approx = cv2.approxPolyDP(self.segments[i], self.smoothing.get(), True)
130 if self.fillmask.get():
131 convexhull = cv2.convexHull(approx)
132 helper.drawPoly(convexhull, color, True)
133 else:
134 helper.drawPoly(approx, color, False)
135
136 # Draw box, unfilled:
137 if self.drawboxes.get():
138 helper.drawRect(x1, y1, x2, y2, color & 0xffffffff, False)
139 helper.drawText(x1 + 3, y1 + 3, label, color & 0xffffffff)
140
141 # ###################################################################################################
142 def postprocess(self, preds, preproc, blobsize, nm = 32):
143 x, protos = preds[0], preds[1] # Two outputs: predictions and protos
144
145 # Transpose the first output: (Batch_size, xywh_conf_cls_nm, Num_anchors) ->
146 # (Batch_size, Num_anchors, xywh_conf_cls_nm)
147 x = np.einsum("bcn->bnc", x)
148
149 # Predictions filtering by conf-threshold
150 x = x[np.amax(x[..., 4:-nm], axis=-1) > self.conf.get()]
151
152 # Create a new matrix which merge these(box, score, cls, nm) into one
153 # For more details about `numpy.c_()`: https://numpy.org/doc/1.26/reference/generated/numpy.c_.html
154 x = np.c_[x[..., :4], np.amax(x[..., 4:-nm], axis=-1), np.argmax(x[..., 4:-nm], axis=-1), x[..., -nm:]]
155
156 # NMS filtering
157 x = x[cv2.dnn.NMSBoxes(x[:, :4], x[:, 4], self.conf.get(), self.iou.get())]
158
159 # Decode and return
160 if len(x) > 0:
161 # Bounding boxes format change: cxcywh -> xyxy
162 x[..., [0, 1]] -= x[..., [2, 3]] / 2
163 x[..., [2, 3]] += x[..., [0, 1]]
164
165 # Bounding boxes boundary clamp
166 x[..., [0, 2]] = x[:, [0, 2]].clip(0, blobsize[1])
167 x[..., [1, 3]] = x[:, [1, 3]].clip(0, blobsize[0])
168
169 # Process masks
170 masks = self.process_mask(protos[0], x[:, 6:], x[:, :4], blobsize)
171
172 # Masks -> Segments(contours)
173 segments = self.masks2segments(masks)
174
175 # Scale all boxes from input blob to input image:
176 for i in range(x.shape[0]):
177 x[i, [0, 1]] = preproc.b2i(x[i, 0], x[i, 1], 0)
178 x[i, [2, 3]] = preproc.b2i(x[i, 2], x[i, 3], 0)
179
180 # Scale all segments from input blob to input image:
181 for s in range(len(segments)):
182 for i in range(segments[s].shape[0]):
183 x1, y1 = segments[s][i, [0, 1]]
184 x1, y1 = preproc.b2i(float(x1), float(y1), 0)
185 segments[s][i, [0, 1]] = [int(x1), int(y1)]
186
187 return x[..., :6], segments, masks # boxes, segments, masks
188 else:
189 return [], [], []
190
191 # ###################################################################################################
192 @staticmethod
193 def masks2segments(masks):
194 """
195 It takes a list of masks(n,h,w) and returns a list of segments(n,xy) (Borrowed from
196 https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L750)
197
198 Args:
199 masks (numpy.ndarray): the output of the model, which is a tensor of shape (batch_size, 160, 160).
200
201 Returns:
202 segments (List): list of segment masks.
203 """
204 segments = []
205 for x in masks.astype("uint8"):
206 c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] # or CHAIN_APPROX_NONE
207 if c:
208 c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
209 else:
210 c = np.zeros((0, 2)) # no segments found
211 segments.append(c.astype("float32"))
212 return segments
213
214 # ###################################################################################################
215 @staticmethod
216 def crop_mask(masks, boxes):
217 """
218 It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box. (Borrowed from
219 https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L599)
220
221 Args:
222 masks (Numpy.ndarray): [n, h, w] tensor of masks.
223 boxes (Numpy.ndarray): [n, 4] tensor of bbox coordinates in relative point form.
224
225 Returns:
226 (Numpy.ndarray): The masks are being cropped to the bounding box.
227 """
228 n, h, w = masks.shape
229 x1, y1, x2, y2 = np.split(boxes[:, :, None], 4, 1)
230 r = np.arange(w, dtype=x1.dtype)[None, None, :]
231 c = np.arange(h, dtype=x1.dtype)[None, :, None]
232 return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
233
234 # ###################################################################################################
235 def process_mask(self, protos, masks_in, bboxes, im0_shape):
236 """
237 Takes the output of the mask head, and applies the mask to the bounding boxes.
238 This produces masks of higher quality but is slower.
239 (Borrowed from https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/
240 ultralytics/utils/ops.py#L618)
241
242 Args:
243 protos (numpy.ndarray): [mask_dim, mask_h, mask_w].
244 masks_in (numpy.ndarray): [n, mask_dim], n is number of masks after nms.
245 bboxes (numpy.ndarray): bboxes re-scaled to original image shape.
246 im0_shape (tuple): the size of the input image (h,w,c).
247
248 Returns:
249 (numpy.ndarray): The upsampled masks.
250 """
251 c, mh, mw = protos.shape
252 masks = np.matmul(masks_in, protos.reshape((c, -1))).reshape((-1, mh, mw)).transpose(1, 2, 0) # HWN
253 masks = np.ascontiguousarray(masks)
254 masks = self.scale_mask(masks, im0_shape) # re-scale mask from P3 shape to original input image shape
255 masks = np.einsum("HWN -> NHW", masks) # HWN -> NHW
256 masks = self.crop_mask(masks, bboxes)
257 return np.greater(masks, 0.5)
258
259 # ###################################################################################################
260 @staticmethod
261 def scale_mask(masks, im0_shape, ratio_pad=None):
262 """
263 Takes a mask, and resizes it to the original image size. (Borrowed from
264 https://github.com/ultralytics/ultralytics/blob/465df3024f44fa97d4fad9986530d5a13cdabdca/ultralytics/utils/ops.py#L305)
265
266 Args:
267 masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3].
268 im0_shape (tuple): the original image shape.
269 ratio_pad (tuple): the ratio of the padding to the original image.
270
271 Returns:
272 masks (np.ndarray): The masks that are being returned.
273 """
274 im1_shape = masks.shape[:2]
275 if ratio_pad is None: # calculate from im0_shape
276 gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
277 pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
278 else:
279 pad = ratio_pad[1]
280
281 # Calculate tlbr of mask
282 top, left = int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1)) # y, x
283 bottom, right = int(round(im1_shape[0] - pad[1] + 0.1)), int(round(im1_shape[1] - pad[0] + 0.1))
284 if len(masks.shape) < 2:
285 raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
286 masks = masks[top:bottom, left:right]
287 masks = cv2.resize(
288 masks, (im0_shape[1], im0_shape[0]), interpolation=cv2.INTER_LINEAR
289 ) # INTER_CUBIC would be better
290 if len(masks.shape) == 2:
291 masks = masks[:, :, None]
292 return masks
293
Python DNN post-processor for YOLOv8-Seg.
freeze(self, doit)
Freeze some parameters that should not be changed at runtime.
report(self, outimg, helper, overlay, idle)
Report the latest results obtained by process() by drawing them outimg is None or a RawImage to draw ...
postprocess(self, preds, preproc, blobsize, nm=32)
getLabel(self, id, score)
Helper to get class name and confidence as a clean string, and a color that varies with class name.
loadClasses(self, filename)
Parameter callback: Load class names when 'classes' parameter value is changed by model zoo.
init(self)
JeVois parameters initialization.
scale_mask(masks, im0_shape, ratio_pad=None)
process(self, outs, preproc)
Get network outputs outs is a list of numpy arrays for the network's outputs.
process_mask(self, protos, masks_in, bboxes, im0_shape)