# Finally, if you really want to you can ask the detector to tell you the score
# for each detection. The score is bigger for more confident detections.
# The third argument to run is an optional adjustment to the detection threshold,
# where a negative value will return more detections and a positive value fewer.
# Also, the idx tells you which of the face sub-detectors matched. This can be
# used to broadly identify faces in different orientations.
if (len(sys.argv[1:]) > 0):
img = io.imread(sys.argv[1])
dets, scores, idx = detector.run(img, 1, -1)
for i, d in enumerate(dets):
print("Detection {}, score: {}, face_type:{}".format(
d, scores[i], idx[i]))
为了方便理解,修改增加注释之后的 face_detector.py
import dlib
from skimage import io
# 使用特征提取器frontal_face_detector
detector = dlib.get_frontal_face_detector()
# path是图片所在路径
path = "F:/code/python/P_dlib_face/pic/"
img = io.imread(path+"1.jpg")
# 特征提取器的实例化
dets = detector(img)
print("人脸数:", len(dets))
# 输出人脸矩形的四个坐标点
for i, d in enumerate(dets):
print("第", i, "个人脸d的坐标:",
"left:", d.left(),
"right:", d.right(),
"top:", d.top(),
"bottom:", d.bottom())
# 绘制图片
win = dlib.image_window()
# 清除覆盖
#win.clear_overlay()
win.set_image(img)
# 将生成的矩阵覆盖上
win.add_overlay(dets)
# 保持图像
dlib.hit_enter_to_continue()
对test.jpg进行人脸检测:
结果:
图片窗口结果:
输出结果:
1 人脸数: 1 2 第 0 个人脸: left: 79 right: 154 top: 47 bottom: 121 3 Hit enter to continue
对于多个人脸的检测结果:
2.2 face_landmark_detection.py
官网给的 face_detector.py
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example program shows how to find frontal human faces in an image and
# estimate their pose. The pose takes the form of 68 landmarks. These are
# points on the face such as the corners of the mouth, along the eyebrows, on
# the eyes, and so forth.
#
# The face detector we use is made using the classic Histogram of Oriented
# Gradients (HOG) feature combined with a linear classifier, an image pyramid,
# and sliding window detection scheme. The pose estimator was created by
# using dlib's implementation of the paper:
# One Millisecond Face Alignment with an Ensemble of Regression Trees by
# Vahid Kazemi and Josephine Sullivan, CVPR 2014
# and was trained on the iBUG 300-W face landmark dataset (see
# https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/):
# C. Sagonas, E. Antonakos, G, Tzimiropoulos, S. Zafeiriou, M. Pantic.
# 300 faces In-the-wild challenge: Database and results.
# Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation "In-The-Wild". 2016.
# You can get the trained model file from:
#
# Note that the license for the iBUG 300-W dataset excludes commercial use.
# So you should contact Imperial College London to find out if it's OK for
# you to use this model file in a commercial product.
#
#
# Also, note that you can train your own models using dlib's machine learning
# tools. See train_shape_predictor.py to see an example.
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
# or
# python setup.py install --yes USE_AVX_INSTRUCTIONS
# if you have a CPU that supports AVX instructions, since this makes some
# things run faster.
#
# Compiling dlib should work on any operating system so long as you have
# CMake and boost-python installed. On Ubuntu, this can be done easily by
# running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install scikit-image
# Or downloaded from
import sys
import os
import dlib
import glob
from skimage import io