Face Recognition With RaspberryPi and Dlib


This project was declared by me six months ago, the deadline is about this or next month.
So I quit my CSGO and finished it in a hurry. Here’s the design and codes. May it helps.

Process:

Hardware:

RaspberryPi 4b 2G

USB Camera

LCD 1602A

Arduino

Stepper motor or other mechanical structure to open the door

HC-SR04 ultrasonic ranging module

DuPont line, Screw, pliers…

(IC module we already designed, nothing to do with this project)

Install system on your Raspi

Download Raspbian from https://www.raspberrypi.org/downloads/raspbian/
USE SD card formatter to burn it into SD card
USE cable to connect your Raspi with your router and get IP of Raspi
Install PuTTY on your PC and login in Raspi

sudo apt-get install xrdp

Now you can use remote desktop on windows to operate Raspi with graphic interface

Install some necessary library

import sys,os,dlib,glob,numpy,cv2,time
import RPi.GPIO as GPIO

Here’s all we need, just use pip or miniconda to install them, easy peasy.

Check your USB cam

import numpy as np
import cv2

cap = cv2.VideoCapture(0)

# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))

while(cap.isOpened()):
    ret, frame = cap.read()
    if ret==True:
        frame = cv2.flip(frame,0)

        # write the flipped frame
        out.write(frame)

        cv2.imshow('frame',frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()

Run it and you will see a window showing images the cam captured. If not, check previous steps.

Find faces in image and generate 128D vector from faces

https://github.com/AKSHAYUBHAT/TensorFace/blob/master/openface/models/dlib/shape_predictor_68_face_landmarks.dat

get this model, input images and it will return the quantity and position of faces in images.

https://github.com/ageitgey/face_recognition_models/blob/master/face_recognition_models/models/dlib_face_recognition_resnet_model_v1.dat

get this model, input faces and it will return a 128D vector containing information of faces.
This is how we describe faces in this program.

Here’s test code.


predictor_path = '/root/Desktop/Test/Dlib_test/shape_predictor_68_face_landmarks.dat' face_rec_model_path = '/root/Desktop/Test/Dlib_test/dlib_face_recognition_resnet_model_v1.dat' faces_folder_path = '/root/Desktop/Test/Dlib_test/faces3' detector = dlib.get_frontal_face_detector() sp = dlib.shape_predictor(predictor_path) facerec = dlib.face_recognition_model_v1(face_rec_model_path) descriptors = [] def ReloadList(): for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")): print("Processing file: {}".format(f)) candidate.append(f) img = cv2.imread(f) dets = detector(img, 1) print("Number of faces detected: {}".format(len(dets))) for k, d in enumerate(dets): shape = sp(img, d) face_descriptor = facerec.compute_face_descriptor(img, shape) # transfer to numpy array v = numpy.array(face_descriptor) descriptors.append(v) numpy.save("a.npy",descriptors)

Drive LCD display to show some information

Wiring:
VSS,GND
VDD, 5V
VO,Potentiometer
RS, GPIO14
RW,GND
EN, GPIO15
D4, GPIO17
D5, GPIO18
D6, GPIO27
D7, GPIO22
A, 5V
K,GND

import RPi.GPIO as GPIO


LCD_RS = 7
LCD_E  = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18
#SET GPIO PATTERN (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)       # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT)  # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
GPIO.setup(21,GPIO.OUT)
GPIO.output(21,GPIO.LOW)

# Define some device constants
LCD_WIDTH = 16    # Maximum characters per line
LCD_CHR = True
LCD_CMD = False

LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line



def lcd_init():
  # Initialise display
  lcd_byte(0x33,LCD_CMD) # 110011 Initialise
  lcd_byte(0x32,LCD_CMD) # 110010 Initialise
  lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
  lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
  lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
  lcd_byte(0x01,LCD_CMD) # 000001 Clear display
  time.sleep(E_DELAY)

def lcd_byte(bits, mode):
  # Send byte to data pins
  # bits = data
  # mode = True  for character
  #        False for command

  GPIO.output(LCD_RS, mode) # RS

  # High bits
  GPIO.output(LCD_D4, False)
  GPIO.output(LCD_D5, False)
  GPIO.output(LCD_D6, False)
  GPIO.output(LCD_D7, False)
  if bits&0x10==0x10:
    GPIO.output(LCD_D4, True)
  if bits&0x20==0x20:
    GPIO.output(LCD_D5, True)
  if bits&0x40==0x40:
    GPIO.output(LCD_D6, True)
  if bits&0x80==0x80:
    GPIO.output(LCD_D7, True)

  # Toggle 'Enable' pin
  lcd_toggle_enable()

  # Low bits
  GPIO.output(LCD_D4, False)
  GPIO.output(LCD_D5, False)
  GPIO.output(LCD_D6, False)
  GPIO.output(LCD_D7, False)
  if bits&0x01==0x01:
    GPIO.output(LCD_D4, True)
  if bits&0x02==0x02:
    GPIO.output(LCD_D5, True)
  if bits&0x04==0x04:
    GPIO.output(LCD_D6, True)
  if bits&0x08==0x08:
    GPIO.output(LCD_D7, True)

  # Toggle 'Enable' pin
  lcd_toggle_enable()

def lcd_toggle_enable():
  # Toggle enable
  time.sleep(E_DELAY)
  GPIO.output(LCD_E, True)
  time.sleep(E_PULSE)
  GPIO.output(LCD_E, False)
  time.sleep(E_DELAY)

def lcd_string(message,line):
  # Send string to display

  message = message.ljust(LCD_WIDTH," ")

  lcd_byte(line, LCD_CMD)

  for i in range(LCD_WIDTH):
    lcd_byte(ord(message[i]),LCD_CHR)







while True:
    lcd_string("Hello World",LCD_LINE_1):

Drive HC-SR04 to detect if there’s something in front of the cam

Wiring:
Trig, GPIO2
Echo, GPIO3
VCC, 5V
GND, GND

import RPi.GPIO as GPIO
import time

GPIO.setmode(GPIO.BCM)

GPIO_TRIGGER = 2
GPIO_ECHO = 3

# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005

def distance():
    GPIO.output(GPIO_TRIGGER, True)
    time.sleep(0.00001)
    GPIO.output(GPIO_TRIGGER, False)
    start_time = time.time()
    stop_time = time.time()
    while GPIO.input(GPIO_ECHO) == 0:
        start_time = time.time()
    while GPIO.input(GPIO_ECHO) == 1:
        stop_time = time.time()
    time_elapsed = stop_time - start_time
    distance = (time_elapsed * 34300) / 2
    return distance


while True:
    print(distance())
    time.sleep(0.1)

Check the distance between face captured and faces registered

# -*- coding: UTF-8 -*-
import sys,os,dlib,glob,numpy,cv2,time
import RPi.GPIO as GPIO
time.sleep(20)


GPIO.setmode(GPIO.BCM)

GPIO_TRIGGER = 2
GPIO_ECHO = 3
LCD_RS = 7
LCD_E  = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18

GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)       # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT)  # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
GPIO.setup(21,GPIO.OUT)
GPIO.output(21,GPIO.LOW)

# Define some device constants
LCD_WIDTH = 16    # Maximum characters per line
LCD_CHR = True
LCD_CMD = False

LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line

# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005

predictor_path = '/root/Desktop/Test/Dlib_test/shape_predictor_68_face_landmarks.dat'

face_rec_model_path = '/root/Desktop/Test/Dlib_test/dlib_face_recognition_resnet_model_v1.dat'

faces_folder_path = '/root/Desktop/Test/Dlib_test/faces3'


detector = dlib.get_frontal_face_detector()

sp = dlib.shape_predictor(predictor_path)

facerec = dlib.face_recognition_model_v1(face_rec_model_path)

descriptors = []

candidate = []

i = 0
point = 0
global col
col=0
for f in glob.glob(os.path.join('/root/Desktop/Test/Dlib_test/Collected', "*.jpg")):
    col+=1



def line_detection(image):
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray,80,150,apertureSize=3)
    lines = cv2.HoughLines(edges,2,numpy.pi/180,180,15,15)
    if(lines==None):
        return 1
    else:
        return 0


def dopen():
    GPIO.output(21,GPIO.HIGH)
    time.sleep(2)
    GPIO.output(21,GPIO.LOW)


def lcd_init():
  # Initialise display
  lcd_byte(0x33,LCD_CMD) # 110011 Initialise
  lcd_byte(0x32,LCD_CMD) # 110010 Initialise
  lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
  lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
  lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
  lcd_byte(0x01,LCD_CMD) # 000001 Clear display
  time.sleep(E_DELAY)

def lcd_byte(bits, mode):
  # Send byte to data pins
  # bits = data
  # mode = True  for character
  #        False for command

  GPIO.output(LCD_RS, mode) # RS

  # High bits
  GPIO.output(LCD_D4, False)
  GPIO.output(LCD_D5, False)
  GPIO.output(LCD_D6, False)
  GPIO.output(LCD_D7, False)
  if bits&0x10==0x10:
    GPIO.output(LCD_D4, True)
  if bits&0x20==0x20:
    GPIO.output(LCD_D5, True)
  if bits&0x40==0x40:
    GPIO.output(LCD_D6, True)
  if bits&0x80==0x80:
    GPIO.output(LCD_D7, True)

  # Toggle 'Enable' pin
  lcd_toggle_enable()

  # Low bits
  GPIO.output(LCD_D4, False)
  GPIO.output(LCD_D5, False)
  GPIO.output(LCD_D6, False)
  GPIO.output(LCD_D7, False)
  if bits&0x01==0x01:
    GPIO.output(LCD_D4, True)
  if bits&0x02==0x02:
    GPIO.output(LCD_D5, True)
  if bits&0x04==0x04:
    GPIO.output(LCD_D6, True)
  if bits&0x08==0x08:
    GPIO.output(LCD_D7, True)

  # Toggle 'Enable' pin
  lcd_toggle_enable()

def lcd_toggle_enable():
  # Toggle enable
  time.sleep(E_DELAY)
  GPIO.output(LCD_E, True)
  time.sleep(E_PULSE)
  GPIO.output(LCD_E, False)
  time.sleep(E_DELAY)

def lcd_string(message,line):
  # Send string to display

  message = message.ljust(LCD_WIDTH," ")

  lcd_byte(line, LCD_CMD)

  for i in range(LCD_WIDTH):
    lcd_byte(ord(message[i]),LCD_CHR)


def distance():

    GPIO.output(GPIO_TRIGGER, True)

    time.sleep(0.00001)
    GPIO.output(GPIO_TRIGGER, False)
    start_time = time.time()
    stop_time = time.time()

    while GPIO.input(GPIO_ECHO) == 0:
        start_time = time.time()

    while GPIO.input(GPIO_ECHO) == 1:
        stop_time = time.time()

    time_elapsed = stop_time - start_time

    distance = (time_elapsed * 34300) / 2
    return distance


def ReloadList():
    for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
        print("Processing file: {}".format(f))
        candidate.append(f)
        img = cv2.imread(f)

        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):  

            shape = sp(img, d)

            face_descriptor = facerec.compute_face_descriptor(img, shape)

            v = numpy.array(face_descriptor)
            descriptors.append(v)
        numpy.save("a.npy",descriptors)


def Check(image):
    #cv2.imshow("video",image)
    lcd_string("Processing...",LCD_LINE_2)
    global point
    point = 0
    global col
    flag = 0
    img = image
    dets = detector(img,1)
    print("Number of faces detected: {}".format(len(dets)))
    dist = []
    num = 0
    if(len(dets) == 1):
        for k, d in enumerate(dets):
            shape = sp(img, d)
            left = d.left()
            top = d.top()
            right = d.right()
            bottom = d.bottom()
            width = right - left
            height = bottom - top
            print(width)
            print(height)
            face_descriptor = facerec.compute_face_descriptor(img, shape)
            d_test = numpy.array(face_descriptor)
        for i in descriptors:
            dist_ = numpy.linalg.norm(i-d_test)
            dist.append(dist_)
            num+=1

        c_d = dict(zip(candidate,dist))
        cd_sorted = sorted(c_d.items(), key=lambda d:d[1])
        dist = sorted(dist)
        for j in range(num):
            if dist[j]<0.3:
                point+=1
            if dist[j]<0.37:
                point+=1
            print("\n",dist[j])
        if height>170 and width>170 and point>6:
            lcd_string("Access",LCD_LINE_2)
            dopen()
            cv2.imwrite('/root/Desktop/Test/Dlib_test/Collected/Collected'+'Access'+str(col)+'.jpg',img)
            lcd_string("Access",LCD_LINE_2)
            return
        else:
            lcd_string("Open Yoru Mouth",LCD_LINE_2)
            time.sleep(0.2)
            cv2.imwrite('/root/Desktop/Test/Dlib_test/Collected/Collected'+'Denied'+str(col)+'.jpg',img)
        col+=1
        if col == 1000:
            col=0
        print(point)
    if(len(dets) != 1):
        lcd_string("No Face Detected",LCD_LINE_2)
        return 1





lcd_init()
lcd_string("Initializing...",LCD_LINE_1)
#descriptors = numpy.load("a.npy")
lcd_string("Load",LCD_LINE_1)
cap = cv2.VideoCapture(0)
ReloadList()
while True:
    dis = distance()
    ret,image = cap.read()
    #cv2.imshow("cam",image)
    CH = cv2.waitKey(1)
    if dis<50:
        lcd_string("Too Close!!!",LCD_LINE_1)
    if dis>70:
        lcd_string("Too Far!!!",LCD_LINE_1)
    if dis>50 and dis<70:
        lcd_string("Stand Stead",LCD_LINE_1)
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()
        ret,image = cap.read()

        lcd_string("Capturing...",LCD_LINE_1)
        ret,image = cap.read()
        lcd_string("Captured",LCD_LINE_1)
        Check(image)








Finally, make your .py a system service.

sudo vim /etc/rc.local

add python /home/pi/myscript.py & before exit 0;
then save.
all done.

此条目发表在未分类分类目录。将固定链接加入收藏夹。

发表评论

电子邮件地址不会被公开。 必填项已用*标注