Iris Tracking - Testing Raspberry Pi Capacitive TFT Screen

0 48994 Medium

Background 

 

I got a chance to test this Raspberry Pi 5” TFT capacitive screen a while ago but I was busy all the time. Finally, I have some free time lately. This day I was on the MediaPipe site and just found the “MediaPipe Iris” function. Although the Python version of MediaPipe does not support Iris tracking, it doesn’t bother me. I decided to give it a try!

projectImage

Design

 

The MediaPipe Face Mesh model estimates 468 3D facial landmarks in real time covering the overall surface geometry of a human face.

projectImage

We can easily find out the landmarks where the eyes are located, and cut out that area.

projectImage

Then the functions of iris recognition and tracking can be realized by cv2.findContours in Opencv.

HARDWARE LIST
1 Raspberry Pi 5” TFT capacitive screen
1 Raspberry Pi 4B
1 Camera

Programming 

 

Part 1. Load MediaPipe and Opencv Libraries

CODE
import cv2 as cv2
import  mediapipe as mp
import numpy as np

Part 2. Coding face Landmark detection

CODE
1.	
# Import visual functions and styles
2.	
3.	mp_drawing=mp.solutions.drawing_utils
4.	
5.	#mp_drawing_styles=mp.solutions.drawing_styles
6.	
7.	# Import 3D face landmarks detection model 
8.	
9.	mp_face_mesh=mp.solutions.face_mesh
10.	
11.	drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
12.	
13.	cap = cv2.VideoCapture(0)
14.	
15.	with mp_face_mesh.FaceMesh(
16.	
17.	    max_num_faces=4,
18.	
19.	    min_detection_confidence=0.5,
20.	
21.	    min_tracking_confidence=0.5) as face_mesh:
22.	
23.	  while cap.isOpened():
24.	
25.	    success, frame = cap.read()
26.	
27.	    if not success:
28.	
29.	      print("Ignoring empty camera frame.")
30.	
31.	      # If loading a video, use 'break' instead of 'continue'.
32.	
33.	      break
34.	
35.	
36.	
37.	    h, w, c = frame.shape
38.	
39.	    # image = cv2.resize(frame, (w //2, h//2))
40.	
41.	    # frame = cv2.flip(frame, 1)
42.	
43.	    image = np.copy(frame)
44.	
45.	    h2, w2, c2 = image.shape
46.	
47.	    # To improve performance, optionally mark the image as not writeable to
48.	
49.	    # pass by reference.
50.	
51.	    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
52.	
53.	    results = face_mesh.process(image)
54.	
55.	
56.	
57.	    # Draw the face mesh annotations on the image.
58.	
59.	    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
60.	
61.	    left_eyes = []
62.	
63.	    right_eyes = []
64.	
65.	    if results.multi_face_landmarks:
66.	
67.	      for face_landmarks in results.multi_face_landmarks:
68.	
69.	        for idx, landmark in enumerate(face_landmarks.landmark):
70.	
71.	          
72.	
73.	          if idx == 246 or idx == 159 or idx == 158  or idx == 145 or idx == 153 or idx == 190:  # left
74.	
75.	          #if idx == 53 or idx == 111:  # left
76.	
77.	            x1 = np.int(landmark.x * w2)
78.	
79.	            y1 = np.int(landmark.y * h2)
80.	
81.	            left_eyes.append((x1, y1))
82.	
83.	            #cv2.circle(image, (x1, y1), 4, (255, 0, 255), 4, cv2.LINE_AA)
84.	
85.	          if idx ==374 or idx == 380  or idx == 385 or idx == 386 or idx == 390  or idx == 414: # right
86.	
87.	          #if idx == 276 or idx == 340: # right
88.	
89.	            x1 = np.int(landmark.x * w2)
90.	
91.	            y1 = np.int(landmark.y * h2)
92.	
93.	            right_eyes.append((x1, y1))
94.	
95.	            #cv2.circle(image, (x1, y1), 4, (0, 255, 255), 4, cv2.LINE_AA)
96.	
97.	        if len(right_eyes)+len(left_eyes)==12: 
98.	
99.	         # Bounding Rectangle, wrap the found shape with a smallest rectangle. 
100.	
101.	         right_box = cv2.boundingRect(np.asarray(right_eyes))
102.	
103.	        
104.	
105.	         left_box = cv2.boundingRect(np.asarray(left_eyes))
106.	
107.	         
108.	
109.	         detect_iris(image, right_box, left_box)
110.	
111.	    cv2.imshow('MediaPipe Face Mesh', image)
112.	
113.	    if cv2.waitKey(5) & 0xFF == 27:
114.	
115.	      cv2.imwrite("D:/iris_detect_result.png", image)
116.	
117.	      break
118.	
119.	cap.release()
120.	
121.	cv2.waitKey(0)
122.	
123.	cv2.destroyAllWindows()
124.	

Part 3. Coding iris detection

CODE
def detect_iris(image, right_box, left_box):
2.	
3.	   
4.	
5.	  left_roi = image[left_box[1]:left_box[1] + left_box[3], left_box[0]:left_box[0] + left_box[2]]
6.	
7.	  cv2.imshow('left_eye', left_roi)
8.	
9.	  lh, lw, lc = left_roi.shape
10.	
11.	  right_roi = image[right_box[1]:right_box[1]+right_box[3],right_box[0]:right_box[0]+right_box[2]]
12.	
13.	  cv2.imshow('right_eye', right_roi)
14.	
15.	  rh, rw, rc = right_roi.shape
16.	
17.	  if rh>0 and lh>0:
18.	
19.	    rows, cols, _ =right_roi.shape   # Save video size for backup 
20.	
21.	    gray_roi = cv2.cvtColor(right_roi, cv2.COLOR_BGR2GRAY)   # convert greyscale
22.	
23.	    gray_roi = cv2.GaussianBlur(gray_roi, (7, 7), 0)    # Gaussian filter once
24.	
25.	    _, threshold = cv2.threshold(gray_roi, 30, 255, cv2.THRESH_BINARY_INV)  #Binarization, adjust threshold as per need
26.	
27.	    contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Draw detection area
28.	
29.	    contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
30.	
31.	
32.	
33.	    for cnt in contours:
34.	
35.	        (x, y, w, h) = cv2.boundingRect(cnt)
36.	
37.	        cv2.circle(right_roi, (x + int(w/2), y + int(h/2)), int(h/2), (0, 0, 255), 3)
38.	
39.	        break
40.	
41.	
42.	
43.	    rows, cols, _ =left_roi.shape   # Save video size for backup
44.	
45.	    gray_roi = cv2.cvtColor(left_roi, cv2.COLOR_BGR2GRAY)   # convert greyscale
46.	
47.	    gray_roi = cv2.GaussianBlur(gray_roi, (7, 7), 0)    # Gaussian filter once
48.	
49.	    _, threshold = cv2.threshold(gray_roi, 30, 255, cv2.THRESH_BINARY_INV)  # Binarization, adjust threshold as per need
50.	
51.	
52.	    contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Draw detection area
53.	
54.	    contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
55.	
56.	
57.	
58.	    for cnt in contours:
59.	
60.	        (x, y, w, h) = cv2.boundingRect(cnt)
61.	
62.	        cv2.circle(left_roi, (x + int(w/2), y + int(h/2)), int(h/2), (0, 0, 255), 3)
63.	
64.	        break
65.	

Connect all parts together and test the program.

projectImage
projectImage
projectImage
projectImage

The article was first published in dfrobot.com.cn, 2022/05/02

URL: https://mc.dfrobot.com.cn/thread-313093-1-1.html

Author:云天

License
All Rights
Reserved
licensBg
0