1.Introduction:
This project uses the Unihiker M10 to complete an artificial intelligence - gesture recognition project. It can recognize the gestures of rock, paper, and scissors through the camera, and light up the LED corresponding to the gesture, demonstrating the charm of AI image recognition technology.
2.Operation Instructions:
Power the Unihiker M10 with a data cable. Make the gestures of "rock, paper, scissors" in the area below the camera. The Unihiker calls the model for gesture recognition, displays the recognition result below the screen, and lights up the lamp corresponding to the gesture. If no gesture is detected, all the gesture lights will be turned off.
3.Material Preparation:
4.Production Steps:
Tip: Before starting the assembly, you need to pre-print the 3D printed parts. The printed part files are at the end of the article.
5.Code
# -*- coding: UTF-8 -*-
# MindPlus
# Python
import re
import cv2
import sys
from unihiker import GUI
from pinpong.board import Board,Pin
from pinpong.extension.unihiker import *
from XEdu.hub import Workflow as wf
import os
def format_valve_output(task):
try:
output_result = ""
output_result = task.format_output(lang="zh")
return output_result
except AttributeError:
return "AttributeError: 请检查输入数据是否正确"
u_gui=GUI()
para_task1 = {}
Board().begin()
p_p22_out=Pin(Pin.P22, Pin.OUT)
p_p21_out=Pin(Pin.P21, Pin.OUT)
p_p24_out=Pin(Pin.P24, Pin.OUT)
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cv2.namedWindow('winname',cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('winname', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cv2.resizeWindow("winname", 240, 280)
cv2.moveWindow("winname", 0, 0)
txt=u_gui.draw_text(text="Loading model...",x=0,y=285,font_size=20, color="#0000FF")
init_para_task1 = {"task":"mmedu"}
init_para_task1["checkpoint"] = "basenn.onnx"
task1 = wf(**init_para_task1)
txt.config(text="Successful!")
while True:
ret, frame = cap.read()
if (ret == True):
para_task1["data"] = frame
if 'task1' in globals() or 'task1' in locals():
rea_result_task1 = task1.inference(**para_task1)
else:
print("init",'task1')
task1 = wf(**init_para_task1)
rea_result_task1 = task1.inference(**para_task1)
output_result_task1 = format_valve_output(task1)
index = output_result_task1["预测结果"]
key = cv2.waitKey(1)
if (index == "Scissors"):
print("Scissors")
txt.config(text="Scissors")
p_p22_out.write_digital(1)
p_p21_out.write_digital(0)
p_p24_out.write_digital(0)
if (index == "Rock"):
print("Rock")
txt.config(text="Rock")
p_p22_out.write_digital(0)
p_p21_out.write_digital(1)
p_p24_out.write_digital(0)
if (index == "Paper"):
print("Paper")
txt.config(text="Paper")
p_p22_out.write_digital(0)
p_p21_out.write_digital(0)
p_p24_out.write_digital(1)
if (index == "none"):
print("none")
txt.config(text="")
p_p22_out.write_digital(0)
p_p21_out.write_digital(0)
p_p24_out.write_digital(0)
frame = cv2.resize(frame,( 220, 240))
frame = cv2.rotate(frame,cv2.ROTATE_90_CLOCKWISE)
cv2.imshow("winname", frame)
6.Q&A
(1)Q:No module named 'XEdu'
A:pip install:xedu-python
(2)Q:Segmentation fault
A:pip install onnx==1.13.0 and pip install onnxruntime==1.13.1
7.Attachment