mobile_face_id_demo.py 3.43 KB
import os
import numpy as np
import MNN
import cv2
import logging
from skimage import transform as trans

from face_detecter import Face_Detector
from face_id import Face_Recognizer


def preprocess(image, landmarks):
    src = np.array([[38.2946, 51.6963],
                    [73.5318, 51.5014],
                    [56.0252, 71.7366],
                    [41.5493, 92.3655],
                    [70.7299, 92.2041] ], dtype=np.float32)

    landmarks = np.array(landmarks)
    dst = landmarks.astype(np.float32)
    tform = trans.SimilarityTransform()
    tform.estimate(dst, src)
    M = tform.params[0:2,:]
    warped = cv2.warpAffine(image, M, (112, 112), borderValue=0.0)
    return warped

def get_face(image, bbox):
    print(bbox)
    face_area = image[bbox[1]:bbox[3], bbox[0]:bbox[2], ]
    norm_image = cv2.cvtColor(face_area, cv2.COLOR_BGR2RGB).astype(np.float32)
    norm_image = cv2.resize(norm_image, (112, 112))    
    return norm_image

def get_norm_face(image, landmarks):
    norm_image = preprocess(image, landmarks)
    # norm_image = cv2.cvtColor(norm_image, cv2.COLOR_BGR2RGB).astype(np.float32)
    norm_image = cv2.resize(norm_image, (112, 112))
    # norm_image = norm_image.transpose((2, 0, 1))
    # norm_image = norm_image.transpose((1,2,0))
    # norm_image = cv2.resize(norm_image, (112, 112))[:,:,::-1]
    return norm_image
 

if __name__ == '__main__':
    det_face_model_path = r'models/det_face_mnn_1.0.0_v0.0.2.mnn'
    reg_face_id_model_path = r'models/cls_face_mnn_1.0.0_v0.0.2.mnn'
    
    id_image_path = r'input/IMG_2099.jpeg'
    life_image_path = r'input/1.jpg'    

    face_det_thr = 0.5
    face_recongnize_thr = 0.2

    face_detector = Face_Detector(det_face_model_path)
    face_recognizer = Face_Recognizer(reg_face_id_model_path)
    for i in range(1):
        id_image = cv2.imread(id_image_path)
        life_image = cv2.imread(life_image_path)
         
        id_face_bboxes, id_face_landmarks, id_max_idx = face_detector.detect(id_image, face_det_thr)
        life_face_bboxes, life_face_landmarks, life_max_idx = face_detector.detect(life_image, face_det_thr)
        print(id_face_bboxes)
        print(life_face_bboxes)
        # id_norm_image = get_face(id_image, id_face_bboxes[id_max_idx])
        id_norm_image = get_norm_face(id_image, id_face_landmarks[id_max_idx])
        cv2.imwrite('results/face1.jpg', id_norm_image)
        # id_norm_image = np.transpose(id_norm_image, (2, 0, 1))
        norm_images = [id_norm_image]

        for j in range(len(life_face_landmarks)):
            life_norm_image = get_norm_face(life_image, life_face_landmarks[j])
            cv2.imwrite('results/face2.jpg', life_norm_image)
            # life_norm_image = np.transpose(life_norm_image, (2, 0, 1))
            # life_norm_image = get_face(life_image, life_face_bboxes[j])
            norm_images.append(life_norm_image)

        embeddings = face_recognizer.recognize(norm_images) 
        gallery_vector = np.mat(embeddings[0])
        res = False
        sim = 0
        for p in range(1, len(embeddings)):
            compare_vector = np.mat(embeddings[p])

            dot = np.sum(np.multiply(gallery_vector, compare_vector), axis=1)
            norm = np.linalg.norm(gallery_vector, axis=1) * np.linalg.norm(compare_vector, axis=1)
            dist_1 = dot / norm

            sim = dist_1.tolist()
            sim = sim[0][0]

            if sim > face_recongnize_thr: res = True
            print('sim {} : {}'.format(j, sim))