1925c5d8 by jiangwenqiang

bug fix

1 parent 4ddf16e9
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
...@@ -112,14 +112,21 @@ class InitModel(object): ...@@ -112,14 +112,21 @@ class InitModel(object):
112 112
113 id_face_bboxes, id_face_landmarks, id_max_idx = self.face_detector.detect(id_rgb_image, self.face_det_thr) 113 id_face_bboxes, id_face_landmarks, id_max_idx = self.face_detector.detect(id_rgb_image, self.face_det_thr)
114 life_face_bboxes, life_face_landmarks, life_max_idx = self.face_detector.detect(life_rgb_image, self.face_det_thr) 114 life_face_bboxes, life_face_landmarks, life_max_idx = self.face_detector.detect(life_rgb_image, self.face_det_thr)
115 115
116 if compare_type != 0 and self.abnormal_face: 116 if compare_type != 0 and self.abnormal_face:
117 new_life_face_bboxes = [] 117 new_life_face_bboxes = []
118 print('1'*100) 118 abnormal_bboxes = []
119 is_abnormal = False
119 for box_idx, face_bbox in enumerate(life_face_bboxes): 120 for box_idx, face_bbox in enumerate(life_face_bboxes):
120 abnormal_pre = self.abnormal_recognizer.reg(life_rgb_image, face_bbox) 121 abnormal_pre = self.abnormal_recognizer.reg(life_rgb_image, face_bbox)
121 if abnormal_pre == 1: 122 if abnormal_pre == 1:
122 new_life_face_bboxes.append(face_bbox) 123 new_life_face_bboxes.append(face_bbox)
124 else:
125 abnormal_bboxes.append([face_bbox])
126 is_abnormal = True
127
128 if is_abnormal:
129 result_dict['abnormal_bboxes'] = abnormal_bboxes
123 130
124 life_face_bboxes = new_life_face_bboxes 131 life_face_bboxes = new_life_face_bboxes
125 132
...@@ -210,7 +217,10 @@ if __name__ == '__main__': ...@@ -210,7 +217,10 @@ if __name__ == '__main__':
210 pre_pos = 0.01 217 pre_pos = 0.01
211 pre_all = 0.01 218 pre_all = 0.01
212 positive_num = 0.01 219 positive_num = 0.01
213 for image_list_txt_line in image_list_txt_lines: 220 total_cost = 0
221 for image_index, image_list_txt_line in enumerate(image_list_txt_lines):
222 if image_index >= 10:
223 break
214 arr = image_list_txt_line.strip().split(' ') 224 arr = image_list_txt_line.strip().split(' ')
215 label = arr[-1] 225 label = arr[-1]
216 226
...@@ -229,16 +239,25 @@ if __name__ == '__main__': ...@@ -229,16 +239,25 @@ if __name__ == '__main__':
229 life_image_name = life_image_name[2:] 239 life_image_name = life_image_name[2:]
230 life_image_dir = life_image_name_arr[-2] 240 life_image_dir = life_image_name_arr[-2]
231 life_image_path = os.path.join(image_dir, life_image_dir, life_image_name) 241 life_image_path = os.path.join(image_dir, life_image_dir, life_image_name)
232 242
243 id_image_path = r'input/0003/0.jpg'
244 life_image_path = r'input/0003/1.jpg'
245
233 id_image = cv2.imread(id_image_path) 246 id_image = cv2.imread(id_image_path)
234 life_image = cv2.imread(life_image_path) 247 life_image = cv2.imread(life_image_path)
235 248
236 r, id_image_str = cv2.imencode('.jpg', id_image) 249 r, id_image_str = cv2.imencode('.jpg', id_image)
237 id_base64_image = base64.b64encode(id_image_str) 250 id_base64_image = base64.b64encode(id_image_str)
238 251
252 with open('base64_0.txt', 'w') as f:
253 f.write(str(id_base64_image))
254
239 r, life_image_str = cv2.imencode('.jpg', life_image) 255 r, life_image_str = cv2.imencode('.jpg', life_image)
240 life_base64_image = base64.b64encode(life_image_str) 256 life_base64_image = base64.b64encode(life_image_str)
241 257
258 with open('base64_1.txt', 'w') as f:
259 f.write(str(life_base64_image))
260
242 st = time.time() 261 st = time.time()
243 result_dict = {} 262 result_dict = {}
244 if format_type == 0: 263 if format_type == 0:
...@@ -247,9 +266,9 @@ if __name__ == '__main__': ...@@ -247,9 +266,9 @@ if __name__ == '__main__':
247 result_dict = main(id_image, life_image, compare_type) 266 result_dict = main(id_image, life_image, compare_type)
248 print(result_dict) 267 print(result_dict)
249 et = time.time() 268 et = time.time()
269 total_cost = total_cost + (et - st)
250 print('total time cost:{}'.format(round((et-st), 2))) 270 print('total time cost:{}'.format(round((et-st), 2)))
251 271
252 print(label)
253 res = result_dict['match'] 272 res = result_dict['match']
254 if res: 273 if res:
255 if label == '1': 274 if label == '1':
...@@ -259,7 +278,8 @@ if __name__ == '__main__': ...@@ -259,7 +278,8 @@ if __name__ == '__main__':
259 else: 278 else:
260 if label == '0': 279 if label == '0':
261 hit += 1 280 hit += 1
262 281
282 print('average time cost:{}'.format(total_cost / 10))
263 print('precision:{}'.format(hit_pos/pre_pos)) 283 print('precision:{}'.format(hit_pos/pre_pos))
264 print('recall:{}'.format(hit_pos/positive_num)) 284 print('recall:{}'.format(hit_pos/positive_num))
265 print('accuracy:{}'.format(hit/pre_all)) 285 print('accuracy:{}'.format(hit/pre_all))
......
This file is too large to display.
...@@ -26,8 +26,8 @@ class Abnormal_Face(object): ...@@ -26,8 +26,8 @@ class Abnormal_Face(object):
26 self.interpreter.runSession(self.session) 26 self.interpreter.runSession(self.session)
27 output_tensor = self.interpreter.getSessionOutput(self.session) 27 output_tensor = self.interpreter.getSessionOutput(self.session)
28 output_data = output_tensor.getData() 28 output_data = output_tensor.getData()
29 output_data = output_data[0] 29 # output_data = output_data[0]
30 output_data = output_data.tolist() 30 # output_data = output_data.tolist()
31 predict_res = output_data.index(max(output_data)) 31 predict_res = output_data.index(max(output_data))
32 32
33 return predict_res 33 return predict_res
......
...@@ -11,12 +11,47 @@ class Face_Recognizer(object): ...@@ -11,12 +11,47 @@ class Face_Recognizer(object):
11 self.reg_input_tensor = self.reg_interpreter.getSessionInput(self.reg_session) 11 self.reg_input_tensor = self.reg_interpreter.getSessionInput(self.reg_session)
12 logging.info('******** Success Init Face ID ********') 12 logging.info('******** Success Init Face ID ********')
13 13
14 # def recognize(self, imgs):
15 #
16 # new_imgs = []
17 # for i in range(len(imgs)):
18 # img = imgs[i]
19 # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
20 # img = (img / 255. - 0.5) / 0.5
21 # img = img.transpose(2, 0, 1)
22 # img = np.expand_dims(img, axis=0)
23 #
24 # new_imgs.append(img)
25 #
26 # new_imgs = np.array(new_imgs)
27 # new_imgs = new_imgs.astype(np.float32)
28 #
29 # self.reg_interpreter.resizeTensor(self.reg_input_tensor, (len(imgs), 3, 112, 112))
30 # self.reg_interpreter.resizeSession(self.reg_session)
31 #
32 # input_tensor = MNN.Tensor((len(imgs), 3, 112, 112), MNN.Halide_Type_Float, new_imgs, MNN.Tensor_DimensionType_Caffe)
33 # self.reg_input_tensor.copyFrom(input_tensor)
34 # self.reg_interpreter.runSession(self.reg_session)
35 # output_tensor = self.reg_interpreter.getSessionOutput(self.reg_session, 'output0')
36 # output = output_tensor.getData()
37 #
38 # # feats.append(output[0])
39 # # feats.append(output)
40 #
41 # embeddings = list()
42 # num = int(len(output)/512)
43 # for i in range(num):
44 # if i < num:
45 # embeddings.append(output[i*512:(i+1)*512])
46 #
47 # feats_np = np.array(embeddings)
48 # return feats_np
49
14 def recognize(self, imgs): 50 def recognize(self, imgs):
15 51
16 feats = [] 52 feats = []
17 for i in range(len(imgs)): 53 for i in range(len(imgs)):
18 img = imgs[i] 54 img = imgs[i]
19
20 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 55 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
21 img = img.astype(np.float) 56 img = img.astype(np.float)
22 img = (img / 255. - 0.5) / 0.5 57 img = (img / 255. - 0.5) / 0.5
...@@ -32,6 +67,5 @@ class Face_Recognizer(object): ...@@ -32,6 +67,5 @@ class Face_Recognizer(object):
32 67
33 # feats.append(output[0]) 68 # feats.append(output[0])
34 feats.append(output) 69 feats.append(output)
35
36 feats_np = np.array(feats) 70 feats_np = np.array(feats)
37 return feats_np 71 return feats_np
......
1 {"code": 2000, "id_face_bboxes": [[141, 94, 361, 380], [42, 133, 56, 156]], "id_face_landmarks": [[[196, 200], [290, 204], [235, 246], [202, 304], [277, 308]], [[51, 142], [53, 142], [54, 147], [50, 151], [52, 152]]], "life_face_bboxes": [[832, 187, 1032, 383]], "life_face_landmarks": [[[890, 261], [971, 258], [937, 294], [911, 339], [968, 336]]], "id_index": 0, "life_index": 0, "sim": 0.3722387966977346, "match": true}
...\ No newline at end of file ...\ No newline at end of file
1 {"code": 2000, "id_face_bboxes": [[572, 142, 713, 305]], "id_face_landmarks": [[[610, 206], [681, 207], [645, 237], [617, 268], [666, 269]]], "life_face_bboxes": [[476, 125, 606, 292]], "life_face_landmarks": [[[509, 191], [572, 192], [537, 219], [514, 253], [560, 253]]], "match": false}
...\ No newline at end of file ...\ No newline at end of file
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!