代码拉取完成,页面将自动刷新
# Edge Impulse - OpenMV Object Detection Example
import sensor, image, time,pyb,omv,math,utime,tf,lcd,gc
from pyb import UART,Pin,Timer,Servo
from machine import SPI
from umotor import UMOTOR
from pid import PID
from ultrasonic import ULTRASONIC
from button import BUTTON
from pycommon import PYCOMMON
common=PYCOMMON()
motor=UMOTOR() #声明电机驱动,B0,B1,B4,B5
button=BUTTON() #声明按键,梦飞openmv只有一个按键D8,因此直接内部指定了按键
count=0
lcd.init() # Initialize the lcd screen.
lcd.set_direction(2)
###################小车前后左右固定方向控制#####################
def run_forward():
motor.run(60,60)
time.sleep(2000) #前进2s后停止
motor.run(0,0)
def run_back():
motor.run(-60,-60)
time.sleep(2000) #后退2s后停止
motor.run(0,0)
def run_left(): #左转后前进
motor.run(60,-60)
time.sleep(500) #转90度
motor.run(60,60)
time.sleep(2000)
motor.run(0,0)
def run_right(): #右转后前进
motor.run(-60,60)
time.sleep(500) #转90度
motor.run(60,60)
time.sleep(2000)
motor.run(0,0)
def run_round(): #转圈
motor.run(-60,60)
time.sleep(2000)
motor.run(0,0)
def gesture_recongnize(img):
global count
score=0
label='\0'
confidence=90
largest_blob=None
hist=img.get_histogram()
thread=hist.get_threshold()
img.binary([(0,thread[0])])
blobs = img.find_blobs([(50, 255)], pixels_threshold=100, area_threshold=100, merge=True)
# 如果找到轮廓,查找最大的轮廓
if blobs:
largest_blob = max(blobs, key=lambda b: b.pixels())
if largest_blob[2]>=img.width() or largest_blob[2]<100 or largest_blob[3]<100 or largest_blob[3]>=img.height():
return None
for obj in tf.classify(net, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
out = obj.output()
max_idx = out.index(max(out))
score = int(out[max_idx]*100)
if (score < confidence and count>0):
score_str = "??:??%"
count=0
else:
label=labels[max_idx]
score_str = "%s:%d%% "%(label, score)
count=count+1
if count>=5 : #多次识别
count=0 #正确次数清空,方便下一次再重新统计
lcd.display(img)
print("gesture:",label)#打印数字
img.draw_rectangle(largest_blob.rect())
img.draw_cross(largest_blob.cx(), largest_blob.cy())
img.draw_string(0, 0,str(score_str), color=(255, 0, 0))
return label
else:
return None
def find_gesture_control(img):
label=gesture_recongnize(img)
if label: #必须连续识别5次正确才认为是正确的数字
lcd.display(img)
if (label=='paper'):
run_forward() #前进
print("go forward")
if (label=='rock'):
run_back() #后退
print("go back")
if (label=='scissor'):
run_round() #转圈
print("go round")
else:
motor.run(0,0)
#手势是集成固件内执行的,少占用内存,需要烧录支持手势识别的固件
try:
# load the model, alloc the model file on the heap if we have at least 64K free after loading
labels,net = tf.load_builtin_model("gesture")
#labels,net = tf.load_builtin_model('yoloface')
except Exception as e:
raise Exception('Failed to load "gesture", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')
print(net) #打印网络参数,可以清楚看到输入输出和占用多少内存
##############################摄像头初始化部分#####################
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((160, 160)) # Set 240x240 window.
sensor.set_hmirror(1) # Set 240x240 window.
clock = time.clock() # Tracks FPS. 设置一个定时器用来计算帧率
# Find a face!
while (True):
clock.tick()
img = sensor.snapshot()
find_gesture_control(img)
lcd.display(img) # Take a picture and display the image.
print(clock.fps(), "fps", end="\n\n")
if button.state():
click_timer=time.ticks() #开始计时
while button.state(): pass #等待按键抬起
if time.ticks()-click_timer>2000: #按键时长超过2s
break #循环退出,回到主界面
else :
click_timer=time.ticks()#计时更新
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。