70 """Draws the bounding box and label for each object."""
73 draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)], outline=
'red')
74 draw.text((bbox.xmin+10, bbox.ymin+10),
'%s: %.2f' % (labels.get(obj.id, obj.id), obj.score), fill=
'red')
79 frame = inframe.getCvRGB()
if self.
rgb else inframe.getCvBGR()
86 image = Image.fromarray(frame);
87 _, scale = common.set_resized_input(self.
interpreter, image.size,
88 lambda size: image.resize(size, Image.LANCZOS))
91 start = time.perf_counter()
93 inference_time = time.perf_counter() - start
104 image = image.convert(
'RGB')
108 frame = np.array(image)
118 cv.putText(frame,
'JeVois Coral Detection - ' + self.
model, (3, 15),
119 cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv.LINE_AA)
121 fps = self.
timer.stop()
122 label = fps +
', %dms' % (inference_time * 1000.0)
123 cv.putText(frame, label, (3, h-5), cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv.LINE_AA)
126 if self.
rgb: outframe.sendCvRGB(frame)
127 else: outframe.sendCv(frame)
133 idle, winw, winh = helper.startFrame()
136 x, y, w, h = helper.drawInputFrame(
"c", inframe,
False,
False)
139 frame = inframe.getCvRGBp()
if self.
rgb else inframe.getCvBGRp()
145 image = Image.fromarray(frame);
146 _, scale = common.set_resized_input(self.
interpreter, image.size,
147 lambda size: image.resize(size, Image.LANCZOS))
149 start = time.perf_counter()
151 inference_time = time.perf_counter() - start
164 label = self.
labels.get(obj.id, obj.id)
166 helper.drawRect(bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax, col,
True)
167 helper.drawText(bbox.xmin+2, bbox.ymin+1,
'%s: %.2f' % (label, obj.score), col)
177 fps = self.
timer.stop()
178 helper.iinfo(inframe, fps, winw, winh);
179 helper.itext(
'JeVois-Pro Python Coral Detection - %s - %dms/inference' %
180 (self.
model, inference_time * 1000.0))
188 def get_objects(self, interpreter, score_threshold = -float(
'inf'), image_scale = (1.0, 1.0)):
189 """Gets results from a detection model as a list of detected objects.
192 interpreter: The ``tf.lite.Interpreter`` to query for results.
193 score_threshold (float): The score threshold for results. All returned
194 results have a score greater-than-or-equal-to this value.
195 image_scale (float, float): Scaling factor to apply to the bounding boxes as
196 (x-scale-factor, y-scale-factor), where each factor is from 0 to 1.0.
199 A list of :obj:`Object` objects, which each contains the detected object's
200 id, score, and bounding box as :obj:`BBox`.
209 if len(signature_list) > 1:
210 raise ValueError(
'Only support model with one signature.')
211 signature = signature_list[next(iter(signature_list))]
212 count = int(interpreter.tensor(signature[
'outputs'][
'output_0'])()[0])
213 scores = interpreter.tensor(signature[
'outputs'][
'output_1'])()[0]
214 class_ids = interpreter.tensor(signature[
'outputs'][
'output_2'])()[0]
215 boxes = interpreter.tensor(signature[
'outputs'][
'output_3'])()[0]
216 elif common.output_tensor(interpreter, 3).size == 1:
217 boxes = common.output_tensor(interpreter, 0)[0]
218 class_ids = common.output_tensor(interpreter, 1)[0]
219 scores = common.output_tensor(interpreter, 2)[0]
220 count = int(common.output_tensor(interpreter, 3)[0])
222 scores = common.output_tensor(interpreter, 0)[0]
223 boxes = common.output_tensor(interpreter, 1)[0]
224 count = (int)(common.output_tensor(interpreter, 2)[0])
225 class_ids = common.output_tensor(interpreter, 3)[0]
227 width, height = common.input_size(interpreter)
228 image_scale_x, image_scale_y = image_scale
229 sx, sy = width / image_scale_x, height / image_scale_y
232 ymin, xmin, ymax, xmax = boxes[i]
233 return detect.Object(
234 id = int(class_ids[i]),
235 score = float(scores[i]),
236 bbox = detect.BBox(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax).scale(sx, sy).map(int))
238 return [make(i)
for i
in range(count)
if scores[i] >= score_threshold]