分类: Facial Beauty

Codes

Plot Ratio Histograms

Python Codes:

import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import linecache

def get_line_content(txt_path, line_num):
    return linecache.getline(txt_path, line_num + 1).strip()

# left_ratio list: num = 2; right_ratio list: num = 4
def get_data_list(txt_path, num):
    ratio = []
    count = 0
    file_length = len(open(txt_path).readlines())
    while count <= file_length - 1:
        ratio.append(float(get_line_content(txt_path, count+num)))
        count = count + 5
    return ratio

def normfun(x,mu,sigma):
    pdf = np.exp(-((x-mu)**2)/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
    return pdf

def draw_hist(left_data, right_data):
    left_bins = np.linspace(min(left_data),max(left_data),80)
    plt.hist(left_data, left_bins, normed=1, facecolor='blue', alpha=0.5)
    # normal distribution
    left_mu =np.mean(left_data)
    left_sigma =np.std(left_data) 
    left_x = np.arange(min(left_data),max(left_data),0.001) 
    left_y = normfun(left_x, left_mu, left_sigma)
    plt.plot(left_x,left_y)
    plt.title(r'Eyebrow-Eye Histogram : $\mu=1.9790$,$\sigma=0.1888$')

    right_bins = np.linspace(min(right_data),max(right_data),80)
    plt.hist(right_data, right_bins, normed=1, facecolor='red', alpha=0.5)
    # normal distribution
    right_mu =np.mean(right_data)
    right_sigma =np.std(right_data) 
    right_x = np.arange(min(right_data),max(right_data),0.001) 
    right_y = normfun(right_x, right_mu, right_sigma)
    plt.plot(right_x,right_y)

    print("left:", left_mu, left_sigma)
    plt.show()

def main():
    txt_path = "D:/Lab/Jiaying_Li/keypoints/eye-eyebrow ratio/euro_male_eye-eyebrow ratio.txt"
    left_ratio = get_data_list(txt_path, 2)
    right_ratio = get_data_list(txt_path, 4)
    draw_hist(left_ratio, right_ratio)

if __name__ == "__main__":
    main()

Codes

Save 68 landmarks to TXT

Python Codes:

import cv2
import dlib
import numpy as np
import imutils
import os

def get_points(img_path):
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    detector = dlib.get_frontal_face_detector()
    detector_path = "D:/Anaconda3/Anaconda/Lib/site-packages/dlib/shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(detector_path)
    dets = detector(gray, 1)
    pts = []
    for face in dets:
        shape = predictor(img, face)  # 68 landmarks on human face
        for pt in shape.parts():
            pt_pos = (pt.x, pt.y)
            pts.append(pt_pos)
    return pts

def write_txt(points_list, txt_path, count):
    with open(txt_path + "/" + "keypoints.txt", "a") as f:
        f.write("#" + str(count) + "\n")
        for points_tuple in points_list:
            f.write(str(points_tuple[0])+"," + str(points_tuple[1]) + "\n")

def main():
    path = "C:/Users/lenovo/Desktop/euromale"
    os.chdir(path)
    files = os.listdir(".")
    count = 1
    while count <= len(files):
        img_path = path + "/" + str(count) + ".jpg"
        write_txt(get_points(img_path), path, count)
        count = count + 1

if __name__ == "__main__":
    main()

Save Eyebrows / Eyes Landmarks to TXT

Python Codes:

import cv2
import dlib
import os

def get_points(img_path):
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    detector = dlib.get_frontal_face_detector()
    detector_path = "D:/Anaconda3/Anaconda/Lib/site-packages/dlib/shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(detector_path)
    dets = detector(gray, 1)
    pts = []
    for face in dets:
        shape = predictor(img, face)  # 68 landmarks on human face
        for pt in shape.parts():
            pt_pos = (pt.x, pt.y)
            pts.append(pt_pos)
    return pts

def write_txt(points_list, txt_path, count):
    with open(txt_path + "/" + "keypoints_eyes-eyebrow.txt", "a") as f:
        f.write("#" + str(count) + "\n")
        f.write("left eyebrow (17-21): " + "\n")
        for points_tuple in points_list[17:22]:
            f.write(str(points_tuple[0])+"," + str(points_tuple[1]) + "\n")
        f.write("right eyebrow (22-26): " + "\n")
        for points_tuple in points_list[22:27]:
            f.write(str(points_tuple[0])+"," + str(points_tuple[1]) + "\n")
        f.write("left eye (36-41): " + "\n")
        for points_tuple in points_list[36:42]:
            f.write(str(points_tuple[0])+"," + str(points_tuple[1]) + "\n")
        f.write("right eye (42-47): " + "\n")
        for points_tuple in points_list[42:48]:
            f.write(str(points_tuple[0])+"," + str(points_tuple[1]) + "\n")  

def main():
    path = "C:/Users/lenovo/Desktop/euromale"
    os.chdir(path)
    files = os.listdir(".")
    count = 1
    while count <= len(files):
        img_path = path + "/" + str(count) + ".jpg"
        write_txt(get_points(img_path), path, count)
        count = count + 1

if __name__ == "__main__":
    main()

Save Eyebrows – Eyes Ratio to TXT

Python Codes:

import linecache

def get_line_content(txt_path, line_num):
    return linecache.getline(txt_path, line_num + 1).strip()

def get_eye_length(count, txt_path, position):
    with open(txt_path, "r") as f:
        point_left = get_line_content(txt_path, 27*(count-1)+position)
        point_right = get_line_content(txt_path, 27*(count-1)+position+3)
        point_left_x, point_left_y = int(point_left.split(",")[0]), int(point_left.split(",")[1])
        point_right_x, point_right_y = int(point_right.split(",")[0]), int(point_right.split(",")[1])
    return ((point_left_x-point_right_x)**2 + (point_left_y-point_right_y)**2)**0.5

def get_eyebrow_length(count, txt_path, position):
    with open(txt_path, "r") as f:
        point_left = get_line_content(txt_path, 27*(count-1)+position)
        point_right = get_line_content(txt_path, 27*(count-1)+position+4)
        point_left_x, point_left_y = int(point_left.split(",")[0]), int(point_left.split(",")[1])
        point_right_x, point_right_y = int(point_right.split(",")[0]), int(point_right.split(",")[1])
    return ((point_left_x-point_right_x)**2 + (point_left_y-point_right_y)**2)**0.5

# left eye position: 14; right eye position: 21; 
# left eyebrow position: 2; right eyebrow position: 8 

def write_txt(count, txt_path):
    with open("C:/Users/lenovo/Desktop/" + "eye-eyebrow ratio.txt", "a") as f:
        f.write(get_line_content(txt_path, 27*(count-1)) + "\n")
        f.write("left ratio\n")
        f.write(str(get_eyebrow_length(count, txt_path, 2)/get_eye_length(count, txt_path, 14)) + "\n")
        f.write("right ratio\n")
        f.write(str(get_eyebrow_length(count, txt_path, 8)/get_eye_length(count, txt_path, 21)) + "\n")

def main():
    txt_path = "C:/Users/lenovo/Desktop/keypoints_eyes-eyebrow.txt"
    count = 1
    file_length = len(open(txt_path).readlines())
    while count <= file_length/27:
        write_txt(count, txt_path)
        count = count + 1

if __name__ == "__main__":
    main()

Codes

Face Alignment

Python Codes:

from mtcnn import MTCNN
import cv2
import numpy as np
import imutils
import os

img_path = "Beautiful faces\\Asia\\Male\\1.jpg"
img = cv2.imread(img_path)
detector = MTCNN()
face_list = detector.detect_faces(img) 

for face in face_list:    
    box = face["box"]    
    keypoints = face["keypoints"]       
    left = keypoints["left_eye"]
    right = keypoints["right_eye"]
    p1 = np.array(left)
    p2 = np.array(right)
    dp = p1-p2
    angle = np.arctan(dp[1] / dp[0])*180/np.pi
    rot_img = imutils.rotate(img, angle)  # rotate
    x,y,w,h = box
    if y>=0: # prevent x/y <0:
        cropped = rot_img[y:y+h,x:x+w]
    if y<0:
        cropped = rot_img[0:h,x:x+w]

cv2.imwrite("C:\\Users\\lenovo\\Desktop\\1.jpg", cropped)

Codes

Average Face Model

Python Codes:

import os
import cv2
import numpy as np
import math
import sys

def readPoints(path):
    
    pointsArray = [] # Create an array of array of points
    for filePath in os.listdir(path):
        if filePath.endswith(".txt"):
            points = [] #Create an array of points.       
            with open(os.path.join(path, filePath)) as file:
                for line in file :
                    x, y = line.split()
                    points.append((int(x), int(y))) # Store array of points
            pointsArray.append(points)
    return pointsArray
 
# Read all jpg images in folder
def readImages(path):  
    imagesArray = [] #Create array of array of images
    for filePath in os.listdir(path):
        if filePath.endswith(".jpg"):
            img = cv2.imread(os.path.join(path,filePath))
            img = np.float32(img)/255.0 # Convert to floating point
            imagesArray.append(img) # Add to array of images
    return imagesArray
 
def similarityTransform(inPoints, outPoints):
    s60 = math.sin(60*math.pi/180)
    c60 = math.cos(60*math.pi/180)  
    inPts = np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()
    xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0];
    yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1];
    inPts.append([np.int(xin), np.int(yin)])
    xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]
    yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]
    outPts.append([np.int(xout), np.int(yout)]
    tform = cv2.estimateRigidTransform(np.array([inPts]), np.array([outPts]), False)
    return tform

def rectContains(rect, point): # Check if a point is inside a rectangle
    if point[0] < rect[0]:
        return False
    elif point[1] < rect[1]:
        return False
    elif point[0] > rect[2]:
        return False
    elif point[1] > rect[3]:
        return False
    return True
 
def calculateDelaunayTriangles(rect, points): # Calculate delanauy triangle
    subdiv = cv2.Subdiv2D(rect) # Create subdiv
    for p in points: # Insert points into subdiv
        subdiv.insert((p[0], p[1]))
    triangleList = subdiv.getTriangleList()
    # List of triangles; each triangle is a list of 3 points (6 numbers)
    delaunayTri = []
    for t in triangleList: # Find the indices of triangles in the points array
        pt = []
        pt.append((t[0], t[1]))
        pt.append((t[2], t[3]))
        pt.append((t[4], t[5]))
        pt1 = (t[0], t[1])
        pt2 = (t[2], t[3])
        pt3 = (t[4], t[5])        
        if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):
            ind = []
            for j in xrange(0, 3):
                for k in xrange(0, len(points)):                    
                    if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):
                        ind.append(k)                            
            if len(ind) == 3:                                                
                delaunayTri.append((ind[0], ind[1], ind[2]))
    return delaunayTri
 
def constrainPoint(p, w, h) :
    p =  (min(max( p[0], 0 ) , w - 1) , min(max(p[1], 0) , h - 1))
    return p

# Apply affine transform calculated using srcTri and dstTri to src and output an image of size
def applyAffineTransform(src, srcTri, dstTri, size):
    # Given a pair of triangles, find the affine transform
    warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
    # Apply the Affine Transform just found to the src image
    dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    return dst

# Warps and alpha blends triangular regions from img1 and img2 to img
def warpTriangle(img1, img2, t1, t2):
    # Find bounding rectangle for each triangle
    r1 = cv2.boundingRect(np.float32([t1]))
    r2 = cv2.boundingRect(np.float32([t2]))
    # Offset points by left top corner of the respective rectangles
    t1Rect = [] 
    t2Rect = []
    t2RectInt = []
    for i in xrange(0, 3):
        t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
        t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
        t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
    mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32) # Get mask by filling triangle
    cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0)
    # Apply warpImage to small rectangular patches
    img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
    size = (r2[2], r2[3])
    img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
    img2Rect = img2Rect * mask
    # Copy triangular region of the rectangular patch to the output image
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ((1.0, 1.0, 1.0) - mask)
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect

if __name__ == '__main__':
    path = 'presidents/'
    # Dimensions of output image
    w = 600
    h = 600
    # Read points for all images
    allPoints = readPoints(path)
    # Read all images
    images = readImages(path)
    # Eye corners
    eyecornerDst = [(np.int(0.3 * w ), np.int(h / 3)), (np.int(0.7 * w ), np.int(h / 3))]
    imagesNorm = []
    pointsNorm = []
    # Add boundary points for delaunay triangulation
    boundaryPts = np.array([(0,0), (w/2,0), (w-1,0), (w-1,h/2), ( w-1, h-1 ), ( w/2, h-1 ), (0, h-1), (0,h/2) ])
    # Initialize location of average points to 0s
    pointsAvg = np.array([(0,0)]* ( len(allPoints[0]) + len(boundaryPts) ), np.float32())
    n = len(allPoints[0])
    numImages = len(images)
    # warp images and trasnform landmarks to output coordinate system, and find average of transformed landmarks
    for i in xrange(0, numImages):
        points1 = allPoints[i]
        # Corners of the eye in input image
        eyecornerSrc  = [allPoints[i][36], allPoints[i][45]]
        # Compute similarity transform
        tform = similarityTransform(eyecornerSrc, eyecornerDst)        
        # Apply similarity transformation
        img = cv2.warpAffine(images[i], tform, (w,h))
        # Apply similarity transform on points
        points2 = np.reshape(np.array(points1), (68,1,2))
        points = cv2.transform(points2, tform)
        points = np.float32(np.reshape(points, (68, 2)))
        # Append boundary points. Will be used in Delaunay Triangulation
        points = np.append(points, boundaryPts, axis=0)
        # Calculate location of average landmark points.
        pointsAvg = pointsAvg + points / numImages
        pointsNorm.append(points)
        imagesNorm.append(img)
    # Delaunay triangulation
    rect = (0, 0, w, h);
    dt = calculateDelaunayTriangles(rect, np.array(pointsAvg))
    # Output image
    output = np.zeros((h,w,3), np.float32())
    # Warp input images to average image landmarks
    for i in xrange(0, len(imagesNorm)):
        img = np.zeros((h,w,3), np.float32());
        # Transform triangles one by one
        for j in xrange(0, len(dt)):
            tin = [] 
            tout = []
            for k in xrange(0, 3):                
                pIn = pointsNorm[i][dt[j][k]]
                pIn = constrainPoint(pIn, w, h)
                pOut = pointsAvg[dt[j][k]]
                pOut = constrainPoint(pOut, w, h)
                tin.append(pIn)
                tout.append(pOut)
            warpTriangle(imagesNorm[i], img, tin, tout)
        output = output + img # Add image intensities for averaging
    output = output / numImages # Divide by numImages to get average
    cv2.imshow('image', output) # Display result
    cv2.waitKey(0);

Codes

Reformat the Images

Python Codes:

import os
os.chdir("beautiful_faces\\caucasian\\male")
# list all files
files = os.listdir(".")    
for filename in files:
    portion = os.path.splitext(filename)
    if portion[1] != ".jpg":    
        newname = "x"+ portion[0] + ".jpg"   
        os.rename(filename,newname)

Rename the Images

Python Codes:

import os
os.chdir("beautiful_faces\\caucasian\\male")
files = os.listdir(".") 
n = 0
for filename in files:
    portion = os.path.splitext(filename)  
    newname = str(n + 1) + ".jpg"  
    os.rename(filename,newname)
    n += 1

Codes

Web Crawler for Images

Python Codes:

# coding:utf-8
import os
import re
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.error
import time

timeout = 5
socket.setdefaulttimeout(timeout)

class Crawler:
    __time_sleep = 0.1      # sleeping time
    __amount = 0
    __start_amount = 0
    __counter = 0
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}       
    # get the url imformation of pirctures
    def __init__(self, t=0.1):
        self.time_sleep = t     # download intervals
    # get suffix name of the pictures
    def get_suffix(self, name):     
        m = re.search(r'\.[^\.]*$', name)
        if m.group(0) and len(m.group(0)) <= 5:
            return m.group(0)
        else:
            return '.jpg'
    # get and generate referrer
    def get_referrer(self, url):        
        par = urllib.parse.urlparse(url)
        if par.scheme:
            return par.scheme + '://' + par.netloc
        else:
            return par.netloc
    def save_image(self, rsp_data, word):        # save images
        if not os.path.exists("./" + word):
            os.mkdir("./" + word)
        self.__counter = len(os.listdir('./' + word)) + 1
        for image_info in rsp_data['imgs']:
            try:
                time.sleep(self.time_sleep)
                suffix = self.get_suffix(image_info['objURL'])            
                refer = self.get_referrer(image_info['objURL'])  # reduce 403 error
                opener = urllib.request.build_opener()
                opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'),('Referer', refer)]
                urllib.request.install_opener(opener)
                urllib.request.urlretrieve(image_info['objURL'], './' + word + '/' + str(self.__counter) + str(suffix))
            except urllib.error.HTTPError as urllib_err:
                print(urllib_err)
                continue
            except Exception as err:
                time.sleep(1)
                print(err)
                print("unknown error, cancel operations")
                continue
            else:
                print("figure+1, there is already" + str(self.__counter) + "figures")
                self.__counter += 1
        return
    def get_images(self, word='beautiful girls'):
        search = urllib.parse.quote(word)
        pn = self.__start_amount    
        while pn < self.__amount:
            url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
                pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
            try:
                time.sleep(self.time_sleep)
                req = urllib.request.Request(url=url, headers=self.headers)
                page = urllib.request.urlopen(req)
                rsp = page.read().decode('unicode_escape')
                page.close()
            except UnicodeDecodeError as e:
                print(e)
                print('-----UnicodeDecodeErrorurl:', url)
            except urllib.error.URLError as e:
                print(e)
                print("-----urlErrorurl:", url)
            except socket.timeout as e:
                print(e)
                print("-----socket timout:", url)
            else:                
                rsp_data = json.loads(rsp)  
                self.save_image(rsp_data, word)
                print("download the next page")
                pn += 60
            finally:
                page.close()
        print("Finished")
        return
    def start(self, word, spider_page_num=1, start_page=1):
        """
        web crawl entrance
        :param word: key words
        :param spider_page_num: the pages for catching pictures; the total picture number is page_num x 60
        :param start_page: start page
        :return:
        """
        self.__start_amount = (start_page - 1) * 60
        self.__amount = spider_page_num * 60 + self.__start_amount
        self.get_images(word)

if __name__ == '__main__':
    crawler = Crawler(0.05)  # delay time: 0.05
    # crawler.start('beautiful girls', 10, 2)  # key word: "beautiful girls", 10 pages (totally 10x60=600 pictures), the start papge is page 2
    # crawler.start("actors", 5, 3)  # key word: "actors", 5 pages (totally 5x60=300 pictures), the start papge is page 1