1925c5d8 by jiangwenqiang

bug fix

1 parent 4ddf16e9
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
......@@ -112,14 +112,21 @@ class InitModel(object):
id_face_bboxes, id_face_landmarks, id_max_idx = self.face_detector.detect(id_rgb_image, self.face_det_thr)
life_face_bboxes, life_face_landmarks, life_max_idx = self.face_detector.detect(life_rgb_image, self.face_det_thr)
if compare_type != 0 and self.abnormal_face:
new_life_face_bboxes = []
print('1'*100)
abnormal_bboxes = []
is_abnormal = False
for box_idx, face_bbox in enumerate(life_face_bboxes):
abnormal_pre = self.abnormal_recognizer.reg(life_rgb_image, face_bbox)
if abnormal_pre == 1:
new_life_face_bboxes.append(face_bbox)
else:
abnormal_bboxes.append([face_bbox])
is_abnormal = True
if is_abnormal:
result_dict['abnormal_bboxes'] = abnormal_bboxes
life_face_bboxes = new_life_face_bboxes
......@@ -210,7 +217,10 @@ if __name__ == '__main__':
pre_pos = 0.01
pre_all = 0.01
positive_num = 0.01
for image_list_txt_line in image_list_txt_lines:
total_cost = 0
for image_index, image_list_txt_line in enumerate(image_list_txt_lines):
if image_index >= 10:
break
arr = image_list_txt_line.strip().split(' ')
label = arr[-1]
......@@ -229,16 +239,25 @@ if __name__ == '__main__':
life_image_name = life_image_name[2:]
life_image_dir = life_image_name_arr[-2]
life_image_path = os.path.join(image_dir, life_image_dir, life_image_name)
id_image_path = r'input/0003/0.jpg'
life_image_path = r'input/0003/1.jpg'
id_image = cv2.imread(id_image_path)
life_image = cv2.imread(life_image_path)
r, id_image_str = cv2.imencode('.jpg', id_image)
id_base64_image = base64.b64encode(id_image_str)
with open('base64_0.txt', 'w') as f:
f.write(str(id_base64_image))
r, life_image_str = cv2.imencode('.jpg', life_image)
life_base64_image = base64.b64encode(life_image_str)
with open('base64_1.txt', 'w') as f:
f.write(str(life_base64_image))
st = time.time()
result_dict = {}
if format_type == 0:
......@@ -247,9 +266,9 @@ if __name__ == '__main__':
result_dict = main(id_image, life_image, compare_type)
print(result_dict)
et = time.time()
total_cost = total_cost + (et - st)
print('total time cost:{}'.format(round((et-st), 2)))
print(label)
res = result_dict['match']
if res:
if label == '1':
......@@ -259,7 +278,8 @@ if __name__ == '__main__':
else:
if label == '0':
hit += 1
print('average time cost:{}'.format(total_cost / 10))
print('precision:{}'.format(hit_pos/pre_pos))
print('recall:{}'.format(hit_pos/positive_num))
print('accuracy:{}'.format(hit/pre_all))
......
This file is too large to display.
......@@ -26,8 +26,8 @@ class Abnormal_Face(object):
self.interpreter.runSession(self.session)
output_tensor = self.interpreter.getSessionOutput(self.session)
output_data = output_tensor.getData()
output_data = output_data[0]
output_data = output_data.tolist()
# output_data = output_data[0]
# output_data = output_data.tolist()
predict_res = output_data.index(max(output_data))
return predict_res
......
......@@ -11,12 +11,47 @@ class Face_Recognizer(object):
self.reg_input_tensor = self.reg_interpreter.getSessionInput(self.reg_session)
logging.info('******** Success Init Face ID ********')
# def recognize(self, imgs):
#
# new_imgs = []
# for i in range(len(imgs)):
# img = imgs[i]
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = (img / 255. - 0.5) / 0.5
# img = img.transpose(2, 0, 1)
# img = np.expand_dims(img, axis=0)
#
# new_imgs.append(img)
#
# new_imgs = np.array(new_imgs)
# new_imgs = new_imgs.astype(np.float32)
#
# self.reg_interpreter.resizeTensor(self.reg_input_tensor, (len(imgs), 3, 112, 112))
# self.reg_interpreter.resizeSession(self.reg_session)
#
# input_tensor = MNN.Tensor((len(imgs), 3, 112, 112), MNN.Halide_Type_Float, new_imgs, MNN.Tensor_DimensionType_Caffe)
# self.reg_input_tensor.copyFrom(input_tensor)
# self.reg_interpreter.runSession(self.reg_session)
# output_tensor = self.reg_interpreter.getSessionOutput(self.reg_session, 'output0')
# output = output_tensor.getData()
#
# # feats.append(output[0])
# # feats.append(output)
#
# embeddings = list()
# num = int(len(output)/512)
# for i in range(num):
# if i < num:
# embeddings.append(output[i*512:(i+1)*512])
#
# feats_np = np.array(embeddings)
# return feats_np
def recognize(self, imgs):
feats = []
for i in range(len(imgs)):
img = imgs[i]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float)
img = (img / 255. - 0.5) / 0.5
......@@ -32,6 +67,5 @@ class Face_Recognizer(object):
# feats.append(output[0])
feats.append(output)
feats_np = np.array(feats)
return feats_np
......
{"code": 2000, "id_face_bboxes": [[141, 94, 361, 380], [42, 133, 56, 156]], "id_face_landmarks": [[[196, 200], [290, 204], [235, 246], [202, 304], [277, 308]], [[51, 142], [53, 142], [54, 147], [50, 151], [52, 152]]], "life_face_bboxes": [[832, 187, 1032, 383]], "life_face_landmarks": [[[890, 261], [971, 258], [937, 294], [911, 339], [968, 336]]], "id_index": 0, "life_index": 0, "sim": 0.3722387966977346, "match": true}
\ No newline at end of file
{"code": 2000, "id_face_bboxes": [[572, 142, 713, 305]], "id_face_landmarks": [[[610, 206], [681, 207], [645, 237], [617, 268], [666, 269]]], "life_face_bboxes": [[476, 125, 606, 292]], "life_face_landmarks": [[[509, 191], [572, 192], [537, 219], [514, 253], [560, 253]]], "match": false}
\ No newline at end of file
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!