pipeline_test_super_pixel_2.py 4.41 KB
import os
import numpy as np
import MNN
import cv2
import logging
from skimage import transform as trans

from face_landmark import Landmark_Detector
from face_detecter import Face_Detector
from face_id import Face_Recognizer


def preprocess(image, landmarks):
    src = np.array([[38.2946, 51.6963],
                    [73.5318, 51.6963],
                    [56.0252, 71.7366],
                    [41.5493, 92.3655],
                    [70.7299, 92.3655] ], dtype=np.float32)

    landmarks = np.array(landmarks)
    dst = landmarks.astype(np.float32)
    tform = trans.SimilarityTransform()
    tform.estimate(dst, src)
    M = tform.params[0:2,:]
    warped = cv2.warpAffine(image, M, (112, 112), borderValue=0.0)
    return warped

def get_face(image, bbox):
    face_area = image[bbox[1]:bbox[3], bbox[0]:bbox[2], ]
    norm_image = cv2.cvtColor(face_area, cv2.COLOR_BGR2RGB).astype(np.float32)
    norm_image = cv2.resize(norm_image, (112, 112))
    return norm_image

def get_norm_face(image, landmarks):
    norm_image = preprocess(image, landmarks)
    # norm_image = cv2.cvtColor(norm_image, cv2.COLOR_BGR2RGB).astype(np.float32)
    # norm_image = cv2.resize(norm_image, (112, 112))
    # norm_image = norm_image.transpose((2, 0, 1))
    # norm_image = norm_image.transpose((1,2,0))
    # norm_image = cv2.resize(norm_image, (112, 112))[:,:,::-1]
    return norm_image

def landmarks_process(all_landmarks, all_bboxes):
    processed_landmarks = []
    for l, landmarks in enumerate(all_landmarks):
        bbox = all_bboxes[l]
        face_w, face_h = bbox[3] - bbox[1], bbox[2] - bbox[0]
        w_r, h_r = face_w / 112, face_h / 112
        point_0 = ((landmarks[0][0] - bbox[0])/w_r, (landmarks[0][1] - bbox[1])/h_r) 
        point_1 = ((landmarks[1][0] - bbox[0])/w_r, (landmarks[1][1] - bbox[1])/h_r)
        point_2 = ((landmarks[2][0] - bbox[0])/w_r, (landmarks[2][1] - bbox[1])/h_r)
        point_3 = ((landmarks[3][0] - bbox[0])/w_r, (landmarks[3][1] - bbox[1])/h_r)
        point_4 = ((landmarks[4][0] - bbox[0])/w_r, (landmarks[4][1] - bbox[1])/h_r)
       
        processed_landmarks.append([point_0, point_1, point_2, point_3, point_4])

    return processed_landmarks


reg_face_id_model_path = r'/home/jwq/PycharmProjects/situ/src/face_id/insightface/recognition/arcface_torch/work_dirs/ms1mv3_r18/ms1mv3_r18_0.96200/ms1mv3_r18.mnn'

image_dir = r'/data2/face_id/situ_other/pipeline_test/'
image_list_txt_path = r'/data2/face_id/situ_other/test2.txt'
save_dir = r'/home/jwq/PycharmProjects/situ/src/superpixel/Face-Super-Resolution/results'

face_recognizer = Face_Recognizer(reg_face_id_model_path)

image_list_txt = open(image_list_txt_path, 'r')
image_list_txt_lines = image_list_txt.readlines()

hit = 0.01
hit_pos = 0.01
pre_pos = 0.01
pre_all = 0.01
positive_num = 0.01
for line_idx, image_list_txt_line in enumerate(image_list_txt_lines):
    arr = image_list_txt_line.strip().split(' ')
    label = arr[-1]
    
    pre_all += 1
    if label == '1':
        positive_num += 1    

    sub_dir = os.path.join(save_dir, str(line_idx))
    id_image_path = os.path.join(sub_dir, 'id.jpg')

    face_recongnize_thr = 0.10
    
    norm_images = []
    id_norm_image = cv2.imread(id_image_path)
    norm_images = [id_norm_image]

    sub_life_dir = os.path.join(sub_dir, 'life')
    if os.path.exists(sub_life_dir) is False:
        continue            

    life_names = os.listdir(sub_life_dir)
    for f, life_name in enumerate(life_names):
        life_path = os.path.join(sub_life_dir, life_name)
        life_norm_image = cv2.imread(life_path)
        norm_images.append(life_norm_image)

    embeddings = face_recognizer.recognize(norm_images)
    gallery_vector = np.mat(embeddings[0])
    res = False
    sim = 0
    for p in range(1, len(embeddings)):
        compare_vector = np.mat(embeddings[p])

        dot = np.sum(np.multiply(gallery_vector, compare_vector), axis=1)
        norm = np.linalg.norm(gallery_vector, axis=1) * np.linalg.norm(compare_vector, axis=1)
        dist_1 = dot / norm

        sim = dist_1.tolist()
        sim = sim[0][0]

        if sim > face_recongnize_thr: res = True
        print('label: {}, sim {} : {}'.format(label, p, sim))
   
    if res:
        if label == '1':
            hit_pos += 1  
            hit += 1        
        pre_pos += 1
    else:
        if label == '0':
            hit += 1

print('precision:{}'.format(hit_pos/pre_pos)) 
print('recall:{}'.format(hit_pos/positive_num))
print('accuracy:{}'.format(hit/pre_all))