We use face_recognition to setup on the Ubuntu server.
Face alignment approach, built using dlib’s state-of-the-art face recognition built with deep learning. The model has an accuracy of 99.38% on the Labeled Faces in the Wild benchmark.
https://github.com/ageitgey/face_recognition
Installation
Ubuntu 18.04.2
Install ssh
sudo apt install openssh-server -y
Development
sudo apt install git python python-pip python-dev python-virtualenv build-essential cmake pkg-config libx11-dev libatlas-base-dev libgtk-3-dev libboost-python-dev virtualenv -y
mkdir -p ~/workspace/python
nano ~/.bash_aliases
Config env path. Declare snip code below to ~/.bash_aliases
#Python env
export ML3_ROOT="~/workspace/python/ml3"
alias ml3="$ML3_ROOT/bin/python3"
alias ml3.setup="virtualenv --system-site-packages -p python3 $ML3_ROOT && ml3.active"
alias ml3.destroy="rm -rf $ML3_ROOT"
alias ml3.active="source $ML3_ROOT/bin/activate"
Active ~/.bash_aliases
and setup a python environment
source ~/.bash_aliases
ml3.setup
Python init
# Active python environment
ml3.active
# Install python libs
pip install flask numpy scipy matplotlib pandas statsmodels scikit-learn IPython seaborn nltk plotly cufflinks lightgbm yellowbrick scikit-image dlib opencv-python
pip install --upgrade tensorflow
pip install face_recognition
Run face_recognition example
cd ~/workspace/
git clone https://github.com/ageitgey/face_recognition.git
cd face_recognition
Test with face_recognition
cli
# on macOS
~/.local/bin/face_recognition --tolerance 0.54 --show-distance true ./examples/knn_examples/train/alex_lacamoire/ ./examples/knn_examples/test/ | cut -d ',' -f2
# on Ubuntu
face_recognition --tolerance 0.54 --show-distance true ./examples/knn_examples/train/alex_lacamoire/ ./examples/knn_examples/test/ | cut -d ',' -f2
Now create your own server to use face_recognition
Example index.py
# This is a _very simple_ example of a web service that recognizes faces in uploaded images.
# - Upload an image file with name for register face
# - Upload an image file and it will check if the image contains a registered picture.
# The result is returned as json. For example:
#
# $ curl -XPOST -F "[email protected]" http://127.0.0.1:5001
#
# Returns:
#
# {
# "face_found": false,
# "person": null
# }
#
# This example is based on the Flask file upload example: http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
# NOTE: This example requires flask to be installed! You can install it with pip:
# $ pip3 install flask
import json
import face_recognition
from flask import Flask, jsonify, request, redirect
class Person:
def __init__(self, name, id, encodings):
self.name = name
self.id = id
self.encodings = encodings
# Alow upload file extensions
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
# Max content file size 1MB
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
# Store all know face with Person info
person_list = []
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET'])
def nhancv_face_id():
return '''
<!doctype html>
<title>nhancv.com Face ID</title>
<h1>For register</h1>
<form action="/register" target="_blank">
<button type="submit">Click me</button>
</form>
<br>
<hr>
<h1>For verify</h1>
<form action="/verify" target="_blank">
<button type="submit">Click me</button>
</form>
'''
@app.route('/verify', methods=['GET', 'POST'])
def verify_person():
# Check if a valid image file was uploaded
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
# The image file seems valid! Detect faces and return the result.
return detect_faces_in_image(file)
# If no valid image file was uploaded, show the file upload form:
return '''
<!doctype html>
<title>nhancv.com Face ID</title>
<h1>Upload a picture to verify</h1>
<form method="POST" url="/verify" enctype="multipart/form-data">
Photo: <input type="file" name="file"><br>
<input type="submit" value="Verify">
</form>
'''
@app.route('/register', methods=['GET', 'POST'])
def register_person():
# Check if a valid image file was uploaded
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
name = request.form.get('name')
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
# The image file seems valid! Detect faces and return the result.
return sample_creating_in_image(name, file)
# If no valid image file was uploaded, show the file upload form:
return '''
<!doctype html>
<title>Register person</title>
<h1>Upload a picture to register</h1>
<form method="POST" url="/register" enctype="multipart/form-data">
Name: <input type="text" name="name"><br>
Photo: <input type="file" name="file"><br>
<input type="submit" value="Register">
</form>
'''
def sample_creating_in_image(name, file_stream):
# Load the uploaded image file
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)
msg = "Face does not found"
face_found = False
if len(unknown_face_encodings) > 0:
face_found = True
msg = "Successful"
# Get input data
id = len(person_list)
if not name:
name = f"Person {id}"
# Add to array
person_list.append(Person(name, id, unknown_face_encodings[0]))
print('Sample person %s:%s created' %(name, id))
else:
print (msg)
# Return the result as json
result = {
"error": not face_found,
"message": msg
}
return jsonify(result)
def detect_faces_in_image(file_stream):
# Extract encodings array
face_encoding_array = list(map(lambda x: x.encodings, person_list))
# Load the uploaded image file
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)
face_found = False
person = None
message = f"Total {len(face_encoding_array)}"
if len(unknown_face_encodings) > 0:
# See if the first face in the uploaded image matches the known face
# face_recognition.api.compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6)
# tolerance – How much distance between faces to consider it a match. Lower is more strict. 0.6 is typical best performance.
match_results = face_recognition.compare_faces(face_encoding_array, unknown_face_encodings[0], 0.3)
for i in range(len(match_results)):
if match_results[i]:
# person = person_list[i].toJSON()
person = {
"name": person_list[i].name,
"id": person_list[i].id
}
face_found = True
message = message + f" - Found: {person_list[i].name}:{person_list[i].id}"
break;
print('Status %s. %s' %(face_found, message))
# Return the result as json
result = {
"face_found": face_found,
"person": person
}
return jsonify(result)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8888, debug=True)
Start server
python index.py
# Access to localhost:8888
Use on client


For mobile, you can use dlib or firebase vision to detect face rect and crop it, then send it to server to register and verify later.