#!
/usr/bin/env python
import device_patches # Device specific patches for Jetson Nano (needs to be
before importing cv2)
import cv2
import os
import sys, getopt
import signal
import time
from edge_impulse_linux.image import ImageImpulseRunner
runner = None
# if you don't want to see a camera preview, set this to False
show_camera = True
if ([Link] == 'linux' and not [Link]('DISPLAY')):
show_camera = False
def now():
return round([Link]() * 1000)
def get_webcams():
port_ids = []
for port in range(5):
print("Looking for a camera in port %s:" %port)
camera = [Link](port)
if [Link]():
ret = [Link]()[0]
if ret:
backendName =[Link]()
w = [Link](3)
h = [Link](4)
print("Camera %s (%s x %s) found in port %s " %(backendName,h,w,
port))
port_ids.append(port)
[Link]()
return port_ids
def sigint_handler(sig, frame):
print('Interrupted')
if (runner):
[Link]()
[Link](0)
[Link]([Link], sigint_handler)
def help():
print('python [Link] <path_to_model.eim> <Camera port ID, only required
when more than 1 camera is present>')
def main(argv):
try:
opts, args = [Link](argv, "h", ["--help"])
except [Link]:
help()
[Link](2)
for opt, arg in opts:
if opt in ('-h', '--help'):
help()
[Link]()
if len(args) == 0:
help()
[Link](2)
model = args[0]
dir_path = [Link]([Link](__file__))
modelfile = [Link](dir_path, model)
print('MODEL: ' + modelfile)
with ImageImpulseRunner(modelfile) as runner:
try:
model_info = [Link]()
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' +
model_info['project']['name'] + '"')
labels = model_info['model_parameters']['labels']
if len(args)>= 2:
videoCaptureDeviceId = int(args[1])
else:
port_ids = get_webcams()
if len(port_ids) == 0:
raise Exception('Cannot find any webcams')
if len(args)<= 1 and len(port_ids)> 1:
raise Exception("Multiple cameras found. Add the camera port ID
as a second argument to use to this script")
videoCaptureDeviceId = int(port_ids[0])
camera = [Link](videoCaptureDeviceId)
ret = [Link]()[0]
if ret:
backendName = [Link]()
w = [Link](3)
h = [Link](4)
print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w,
videoCaptureDeviceId))
[Link]()
else:
raise Exception("Couldn't initialize selected camera.")
next_frame = 0 # limit to ~10 fps here
for res, img in [Link](videoCaptureDeviceId):
if (next_frame > now()):
[Link]((next_frame - now()) / 1000)
# print('classification runner response', res)
if "classification" in res["result"].keys():
print('Result (%d ms.) ' % (res['timing']['dsp'] +
res['timing']['classification']), end='')
for label in labels:
score = res['result']['classification'][label]
print('%s: %.2f\t' % (label, score), end='')
print('', flush=True)
elif "bounding_boxes" in res["result"].keys():
print('Found %d bounding boxes (%d ms.)' % (len(res["result"]
["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
for bb in res["result"]["bounding_boxes"]:
print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'],
bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
img = [Link](img, (bb['x'], bb['y']), (bb['x'] +
bb['width'], bb['y'] + bb['height']), (0, 0, 255), 2)
if (show_camera):
[Link]('edgeimpulse', [Link](img, cv2.COLOR_RGB2BGR))
if [Link](1) == ord('q'):
break
next_frame = now() + 100
finally:
if (runner):
[Link]()
if __name__ == "__main__":
main([Link][1:])