Merge branch 'feature/smart_dda' into feature/0918
Showing
1 changed file
with
464 additions
and
0 deletions
1 | import os | ||
2 | import re | ||
3 | import time | ||
4 | import json | ||
5 | import shutil | ||
6 | import base64 | ||
7 | import signal | ||
8 | import requests | ||
9 | import traceback | ||
10 | from PIL import Image | ||
11 | from datetime import datetime | ||
12 | from django.core.management import BaseCommand | ||
13 | from multiprocessing import Process, Queue | ||
14 | from openpyxl import load_workbook, Workbook | ||
15 | |||
16 | from settings import conf | ||
17 | from common.mixins import LoggerMixin | ||
18 | from common.tools.pdf_to_img import PDFHandler | ||
19 | from apps.doc import consts | ||
20 | from apps.doc.exceptions import OCR1Exception, OCR2Exception, LTGTException | ||
21 | from apps.doc.ocr.wb import BSWorkbook | ||
22 | |||
23 | |||
24 | class TIFFHandler: | ||
25 | |||
26 | def __init__(self, path, img_save_path): | ||
27 | self.path = path | ||
28 | self.img_save_path = img_save_path | ||
29 | self.img_path_list = [] | ||
30 | |||
31 | def extract_image(self): | ||
32 | os.makedirs(self.img_save_path, exist_ok=True) | ||
33 | tiff = Image.open(self.path) | ||
34 | tiff.load() | ||
35 | |||
36 | for i in range(tiff.n_frames): | ||
37 | try: | ||
38 | save_path = os.path.join(self.img_save_path, 'page_{0}.jpeg'.format(i)) | ||
39 | tiff.seek(i) | ||
40 | tiff.save(save_path) | ||
41 | self.img_path_list.append(save_path) | ||
42 | except EOFError: | ||
43 | break | ||
44 | |||
45 | |||
46 | class Command(BaseCommand, LoggerMixin): | ||
47 | |||
48 | def __init__(self): | ||
49 | super().__init__() | ||
50 | self.log_base = '[folder dda process]' | ||
51 | # 处理文件开关 | ||
52 | self.switch = True | ||
53 | self.DATE_KEY = 'date' | ||
54 | self.CLASSIFY_KEY = 'classify' | ||
55 | self.RESULT_KEY = 'result' | ||
56 | self.daily_wb_name = 'Output_{0}.xlsx' | ||
57 | self.short_sleep_time = 10 | ||
58 | self.long_sleep_time = 3600 | ||
59 | # 睡眠时间 | ||
60 | self.sleep_time = float(conf.SLEEP_SECOND_FOLDER) | ||
61 | # input folder | ||
62 | self.input_dirs = conf.get_namespace('DDA_DIR_') | ||
63 | # seperate folder name | ||
64 | self.seperate_map = { | ||
65 | consts.IC_CLASSIFY: 'IDCard', | ||
66 | consts.BC_CLASSIFY: 'BankCard', | ||
67 | consts.PP_CLASSIFY: 'Passport', | ||
68 | consts.EEP_CLASSIFY: 'EntryPermit', | ||
69 | } | ||
70 | self.field_map = { | ||
71 | consts.IC_CLASSIFY: (consts.IC_CN_NAME, '有效期限', consts.IC_FIELD_ORDER_3, consts.IC_FIELD_ORDER_2), | ||
72 | consts.BC_CLASSIFY: (consts.BC_CN_NAME, None, None, consts.BC_FIELD_ORDER_2), | ||
73 | consts.PP_CLASSIFY: (consts.PP_CN_NAME, None, None, consts.PP_FIELD_ORDER), | ||
74 | consts.EEP_CLASSIFY: (consts.EEP_CN_NAME, None, None, consts.EEP_FIELD_ORDER), | ||
75 | } | ||
76 | # ocr相关 | ||
77 | self.ocr_url = conf.OCR_URL_FOLDER | ||
78 | self.ocr_url_2 = conf.OCR2_URL_FOLDER | ||
79 | # self.ocr_url_4 = conf.IC_URL | ||
80 | # 优雅退出信号:15 | ||
81 | signal.signal(signal.SIGTERM, self.signal_handler) | ||
82 | |||
83 | def signal_handler(self, sig, frame): | ||
84 | self.switch = False # 停止处理文件 | ||
85 | |||
86 | def license1_process(self, ocr_data, all_res, classify): | ||
87 | # 类别:'0'身份证, '1'居住证 | ||
88 | license_data = ocr_data.get('data', []) | ||
89 | if not license_data: | ||
90 | return | ||
91 | if classify == consts.IC_CLASSIFY: | ||
92 | for id_card_dict in license_data: | ||
93 | try: | ||
94 | id_card_dict.pop('base64_img') | ||
95 | except Exception as e: | ||
96 | continue | ||
97 | all_res.extend(license_data) | ||
98 | |||
99 | def license2_process(self, ocr_data, all_res, classify, img_path): | ||
100 | pid, _, _, _, _, _ = consts.LICENSE_CLASSIFY_MAPPING.get(classify) | ||
101 | file_data = ocr_data.get('section_img') | ||
102 | if file_data is None: | ||
103 | with open(img_path, 'rb') as f: | ||
104 | base64_data = base64.b64encode(f.read()) | ||
105 | # 获取解码后的base64值 | ||
106 | file_data = base64_data.decode() | ||
107 | json_data_2 = { | ||
108 | "pid": str(pid), | ||
109 | "filedata": file_data | ||
110 | } | ||
111 | |||
112 | for times in range(consts.RETRY_TIMES): | ||
113 | try: | ||
114 | start_time = time.time() | ||
115 | ocr_2_response = requests.post(self.ocr_url_2, data=json_data_2) | ||
116 | if ocr_2_response.status_code != 200: | ||
117 | raise OCR2Exception('ocr_2 status code: {0}'.format(ocr_2_response.status_code)) | ||
118 | except Exception as e: | ||
119 | self.folder_log.warn( | ||
120 | '{0} [ocr_2 failed] [times={1}] [img_path={2}] [error={3}]'.format( | ||
121 | self.log_base, times, img_path, traceback.format_exc())) | ||
122 | else: | ||
123 | ocr_res_2 = json.loads(ocr_2_response.text) | ||
124 | end_time = time.time() | ||
125 | speed_time = int(end_time - start_time) | ||
126 | self.folder_log.info( | ||
127 | '{0} [ocr_2 success] [img={1}] [speed_time={2}]'.format( | ||
128 | self.log_base, img_path, speed_time)) | ||
129 | |||
130 | if ocr_res_2.get('ErrorCode') in consts.SUCCESS_CODE_SET: | ||
131 | if pid == consts.BC_PID: | ||
132 | all_res.append(ocr_res_2) | ||
133 | else: | ||
134 | # 营业执照等 | ||
135 | for result_dict in ocr_res_2.get('ResultList', []): | ||
136 | res_dict = {} | ||
137 | for field_dict in result_dict.get('FieldList', []): | ||
138 | res_dict[field_dict.get('chn_key', '')] = field_dict.get('value', '') | ||
139 | all_res.append(res_dict) | ||
140 | break | ||
141 | |||
142 | @staticmethod | ||
143 | def parse_img_path(img_path): | ||
144 | # 'page_{0}_img_{1}.{2}'.format(pno, img_index, ext) | ||
145 | img_name, _ = os.path.splitext(os.path.basename(img_path)) | ||
146 | if re.match(r'page_\d+_img_\d+', img_name): | ||
147 | part_list = img_name.split('_') | ||
148 | return img_name, int(part_list[1])+1, int(part_list[3])+1 | ||
149 | else: | ||
150 | return img_name, 1, 1 | ||
151 | |||
152 | @staticmethod | ||
153 | def get_path(name, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir): | ||
154 | time_stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') | ||
155 | new_name = '{0}_{1}'.format(time_stamp, name) | ||
156 | img_save_path = os.path.join(img_output_dir, new_name) | ||
157 | pdf_save_path = os.path.join(pdf_output_dir, new_name) | ||
158 | excel_name = '{0}.xlsx'.format(os.path.splitext(new_name)[0]) | ||
159 | excel_path = os.path.join(wb_output_dir, excel_name) | ||
160 | seperate_path = None if seperate_dir is None else os.path.join(seperate_dir, new_name) | ||
161 | return img_save_path, excel_path, pdf_save_path, seperate_path | ||
162 | |||
163 | def res_process(self, all_res, excel_path, classify): | ||
164 | try: | ||
165 | wb = BSWorkbook(set(), set(), set(), set(), set()) | ||
166 | sheet_name, key_field, side_field_order, src_field_order = self.field_map.get(classify) | ||
167 | ws = wb.create_sheet(sheet_name) | ||
168 | for res in all_res: | ||
169 | if key_field is not None and key_field in res: | ||
170 | field_order = side_field_order | ||
171 | else: | ||
172 | field_order = src_field_order | ||
173 | for search_field, write_field in field_order: | ||
174 | field_value = res.get(search_field, '') | ||
175 | if isinstance(field_value, list): | ||
176 | ws.append((write_field, *field_value)) | ||
177 | else: | ||
178 | ws.append((write_field, field_value)) | ||
179 | ws.append((None,)) | ||
180 | wb.remove_base_sheet() | ||
181 | wb.save(excel_path) | ||
182 | except Exception as e: | ||
183 | self.folder_log.error('{0} [wb build error] [path={1}] [error={2}]'.format( | ||
184 | self.log_base, excel_path, traceback.format_exc())) | ||
185 | |||
186 | def basename(self, path): | ||
187 | # A basename() variant which first strips the trailing slash, if present. | ||
188 | # Thus we always get the last component of the path, even for directories. | ||
189 | sep = os.path.sep + (os.path.altsep or '') | ||
190 | return os.path.basename(path.rstrip(sep)) | ||
191 | |||
192 | def ocr_process(self, img_path, classify, all_res, seperate_dir): | ||
193 | if os.path.exists(img_path): | ||
194 | # TODO 图片验证 | ||
195 | with open(img_path, 'rb') as f: | ||
196 | base64_data = base64.b64encode(f.read()) | ||
197 | # 获取解码后的base64值 | ||
198 | file_data = base64_data.decode() | ||
199 | json_data = { | ||
200 | "file": file_data, | ||
201 | } | ||
202 | if seperate_dir is None: | ||
203 | json_data["classify"] = classify | ||
204 | |||
205 | for times in range(consts.RETRY_TIMES): | ||
206 | try: | ||
207 | start_time = time.time() | ||
208 | ocr_response = requests.post(self.ocr_url, json=json_data) | ||
209 | if ocr_response.status_code != 200: | ||
210 | raise OCR1Exception('{0} ocr status code: {1}'.format(self.log_base, ocr_response.status_code)) | ||
211 | except Exception as e: | ||
212 | self.folder_log.warn('{0} [ocr failed] [times={1}] [img_path={2}] [error={3}]'.format( | ||
213 | self.log_base, times, img_path, traceback.format_exc())) | ||
214 | else: | ||
215 | ocr_res = ocr_response.json() | ||
216 | end_time = time.time() | ||
217 | speed_time = int(end_time - start_time) | ||
218 | self.folder_log.info('{0} [ocr success] [img={1}] [res={2}] [speed_time={3}]'.format( | ||
219 | self.log_base, img_path, ocr_res, speed_time)) | ||
220 | |||
221 | if isinstance(ocr_res, dict): | ||
222 | if ocr_res.get('code') == 1: | ||
223 | data_list = ocr_res.get('data', []) | ||
224 | if isinstance(data_list, list): | ||
225 | for ocr_data in data_list: | ||
226 | if ocr_data.get('classify') == classify: | ||
227 | if seperate_dir is not None: | ||
228 | os.makedirs(seperate_dir, exist_ok=True) | ||
229 | real_dst = os.path.join(seperate_dir, self.basename(img_path)) | ||
230 | if not os.path.exists(real_dst): | ||
231 | shutil.move(img_path, seperate_dir) | ||
232 | if classify in consts.LICENSE_CLASSIFY_SET_1: | ||
233 | self.license1_process(ocr_data, all_res, classify) | ||
234 | elif classify in consts.LICENSE_CLASSIFY_SET_2: | ||
235 | self.license2_process(ocr_data, all_res, classify, img_path) | ||
236 | break | ||
237 | else: | ||
238 | self.folder_log.warn('{0} [ocr failed] [img_path={1}]'.format(self.log_base, img_path)) | ||
239 | |||
240 | def images_process(self, img_path_list, classify, excel_path, seperate_dir): | ||
241 | all_res = [] | ||
242 | for img_path in img_path_list: | ||
243 | self.ocr_process(img_path, classify, all_res, seperate_dir) | ||
244 | # if len(all_res) > 0: | ||
245 | self.res_process(all_res, excel_path, classify) | ||
246 | return all_res | ||
247 | |||
248 | def pdf_process(self, name, path, classify, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir): | ||
249 | if os.path.exists(path): | ||
250 | rebuild_res = None | ||
251 | try: | ||
252 | img_save_path, excel_path, pdf_save_path, seperate_path = self.get_path( | ||
253 | name, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir) | ||
254 | self.folder_log.info('{0} [pdf to img start] [path={1}]'.format(self.log_base, path)) | ||
255 | pdf_handler = PDFHandler(path, img_save_path) | ||
256 | pdf_handler.extract_image() | ||
257 | self.folder_log.info('{0} [pdf to img end] [path={1}]'.format(self.log_base, path)) | ||
258 | except Exception as e: | ||
259 | self.folder_log.error('{0} [pdf to img error] [path={1}] [error={2}]'.format( | ||
260 | self.log_base, path, traceback.format_exc())) | ||
261 | raise e | ||
262 | else: | ||
263 | rebuild_res = self.images_process(pdf_handler.img_path_list, classify, excel_path, seperate_path) | ||
264 | shutil.move(path, pdf_save_path) | ||
265 | return rebuild_res | ||
266 | |||
267 | def tif_process(self, name, path, classify, img_output_dir, wb_output_dir, tiff_output_dir, seperate_dir): | ||
268 | if os.path.exists(path): | ||
269 | rebuild_res = None | ||
270 | try: | ||
271 | img_save_path, excel_path, tiff_save_path, seperate_path = self.get_path( | ||
272 | name, img_output_dir, wb_output_dir, tiff_output_dir, seperate_dir) | ||
273 | self.folder_log.info('{0} [tiff to img start] [path={1}]'.format(self.log_base, path)) | ||
274 | tiff_handler = TIFFHandler(path, img_save_path) | ||
275 | tiff_handler.extract_image() | ||
276 | self.folder_log.info('{0} [tiff to img end] [path={1}]'.format(self.log_base, path)) | ||
277 | except Exception as e: | ||
278 | self.folder_log.error('{0} [tiff to img error] [path={1}] [error={2}]'.format( | ||
279 | self.log_base, path, traceback.format_exc())) | ||
280 | raise e | ||
281 | else: | ||
282 | rebuild_res = self.images_process(tiff_handler.img_path_list, classify, excel_path, seperate_path) | ||
283 | shutil.move(path, tiff_save_path) | ||
284 | return rebuild_res | ||
285 | |||
286 | def img_process(self, name, path, classify, wb_output_dir, img_output_dir, pdf_output_dir, seperate_dir): | ||
287 | rebuild_res = None | ||
288 | try: | ||
289 | img_save_path, excel_path, _, seperate_path = self.get_path( | ||
290 | name, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir) | ||
291 | except Exception as e: | ||
292 | self.folder_log.error('{0} [get path error] [path={1}] [error={2}]'.format( | ||
293 | self.log_base, path, traceback.format_exc())) | ||
294 | else: | ||
295 | rebuild_res = self.images_process([path], classify, excel_path, seperate_path) | ||
296 | shutil.move(path, img_save_path) | ||
297 | return rebuild_res | ||
298 | |||
299 | def wb_process(self, wb_dir, result_queue): | ||
300 | while self.switch: | ||
301 | result_list = [] | ||
302 | date_str = None | ||
303 | for i in range(100): | ||
304 | try: | ||
305 | result = result_queue.get(block=False) | ||
306 | except Exception as e: | ||
307 | time.sleep(self.short_sleep_time) | ||
308 | else: | ||
309 | if date_str is None: | ||
310 | date_str = result[self.DATE_KEY] | ||
311 | result_list.append(result) | ||
312 | elif result[self.DATE_KEY] == date_str: | ||
313 | result_list.append(result) | ||
314 | else: | ||
315 | break | ||
316 | if date_str is None: | ||
317 | time.sleep(self.long_sleep_time) | ||
318 | continue | ||
319 | else: | ||
320 | wb_name = self.daily_wb_name.format(date_str) | ||
321 | wb_path = os.path.join(wb_dir, wb_name) | ||
322 | if os.path.isfile(wb_path): | ||
323 | wb = load_workbook(wb_path) | ||
324 | else: | ||
325 | wb = Workbook() | ||
326 | for result in result_list: | ||
327 | try: | ||
328 | sheet_name, key_field, side_field_order, field_order = self.field_map[result[self.CLASSIFY_KEY]] | ||
329 | if key_field is not None and key_field in result[self.RESULT_KEY]: | ||
330 | head_fields = [a for a, _ in side_field_order] | ||
331 | else: | ||
332 | head_fields = [a for a, _ in field_order] | ||
333 | row = [] | ||
334 | for field in head_fields: | ||
335 | row.append(result[self.RESULT_KEY].get(field)) | ||
336 | if sheet_name in wb.sheetnames: | ||
337 | ws = wb.get_sheet_by_name(sheet_name) | ||
338 | else: | ||
339 | ws = wb.create_sheet(sheet_name) | ||
340 | ws.append(head_fields) | ||
341 | ws.append(row) | ||
342 | except Exception as e: | ||
343 | self.folder_log.info('{0} [daily wb failed] [result={1}] [error={2}]'.format( | ||
344 | self.log_base, result, traceback.format_exc())) | ||
345 | wb.save(wb_path) | ||
346 | |||
347 | def folder_process(self, input_dir, classify, is_combined, result_queue): | ||
348 | while not os.path.isdir(input_dir): | ||
349 | self.folder_log.info('{0} [input dir is not dir] [input_dir={1}]'.format(self.log_base, input_dir)) | ||
350 | if self.switch: | ||
351 | time.sleep(self.sleep_time) | ||
352 | continue | ||
353 | else: | ||
354 | return | ||
355 | output_dir = os.path.join(os.path.dirname(input_dir), 'Output') | ||
356 | seperate_dir = os.path.join(output_dir, self.seperate_map.get(classify, 'Unknown')) if is_combined else None | ||
357 | img_output_dir = os.path.join(output_dir, 'image') | ||
358 | wb_output_dir = os.path.join(output_dir, 'excel') | ||
359 | pdf_output_dir = os.path.join(output_dir, 'pdf') | ||
360 | tiff_output_dir = os.path.join(output_dir, 'tiff') | ||
361 | failed_output_dir = os.path.join(output_dir, 'failed') | ||
362 | os.makedirs(output_dir, exist_ok=True) | ||
363 | os.makedirs(img_output_dir, exist_ok=True) | ||
364 | os.makedirs(wb_output_dir, exist_ok=True) | ||
365 | os.makedirs(pdf_output_dir, exist_ok=True) | ||
366 | os.makedirs(tiff_output_dir, exist_ok=True) | ||
367 | os.makedirs(failed_output_dir, exist_ok=True) | ||
368 | if seperate_dir is not None: | ||
369 | os.makedirs(seperate_dir, exist_ok=True) | ||
370 | os_error_filename_set = set() | ||
371 | while self.switch: | ||
372 | # if not os.path.isdir(input_dir): | ||
373 | # self.folder_log.info('{0} [input dir is not dir] [input_dir={1}]'.format(self.log_base, input_dir)) | ||
374 | # time.sleep(self.sleep_time) | ||
375 | # continue | ||
376 | # 1. 从input dir获取pdf or image | ||
377 | list_dir = os.listdir(input_dir) | ||
378 | if not list_dir and len(os_error_filename_set) == 0: | ||
379 | self.folder_log.info('{0} [input dir empty] [input_dir={1}]'.format(self.log_base, input_dir)) | ||
380 | time.sleep(self.sleep_time) | ||
381 | continue | ||
382 | all_file_set = set(list_dir) | ||
383 | true_file_set = all_file_set - os_error_filename_set | ||
384 | if len(true_file_set) == 0 and len(os_error_filename_set) > 0: | ||
385 | true_file_set.add(os_error_filename_set.pop()) | ||
386 | for name in true_file_set: | ||
387 | path = os.path.join(input_dir, name) | ||
388 | try: | ||
389 | if os.path.isfile(path): | ||
390 | self.folder_log.info('{0} [file start] [path={1}]'.format(self.log_base, path)) | ||
391 | if name.endswith('.pdf') or name.endswith('.PDF'): | ||
392 | result = self.pdf_process(name, path, classify, img_output_dir, wb_output_dir, | ||
393 | pdf_output_dir, seperate_dir) | ||
394 | elif name.endswith('.tif') or name.endswith('.TIF'): | ||
395 | result = self.tif_process(name, path, classify, img_output_dir, wb_output_dir, | ||
396 | tiff_output_dir, seperate_dir) | ||
397 | else: | ||
398 | result = self.img_process(name, path, classify, wb_output_dir, img_output_dir, | ||
399 | pdf_output_dir, seperate_dir) | ||
400 | self.folder_log.info('{0} [file end] [path={1}]'.format(self.log_base, path)) | ||
401 | else: | ||
402 | result = None | ||
403 | self.folder_log.info('{0} [path is dir] [path={1}]'.format(self.log_base, input_dir)) | ||
404 | failed_path = os.path.join(failed_output_dir, '{0}_{1}'.format(time.time(), name)) | ||
405 | shutil.move(path, failed_path) | ||
406 | except OSError: | ||
407 | os_error_filename_set.add(name) | ||
408 | self.folder_log.error('{0} [os error] [path={1}] [error={2}]'.format( | ||
409 | self.log_base, path, traceback.format_exc())) | ||
410 | except Exception as e: | ||
411 | try: | ||
412 | self.folder_log.error('{0} [file error] [path={1}] [error={2}]'.format(self.log_base, path, | ||
413 | traceback.format_exc())) | ||
414 | failed_path = os.path.join(failed_output_dir, '{0}_{1}'.format(time.time(), name)) | ||
415 | shutil.move(path, failed_path) | ||
416 | except Exception as e: | ||
417 | os_error_filename_set.add(name) | ||
418 | self.folder_log.error('{0} [file move error] [path={1}] [error={2}]'.format( | ||
419 | self.log_base, path, traceback.format_exc())) | ||
420 | else: | ||
421 | if isinstance(result, dict) and len(result) > 0: | ||
422 | date_str = time.strftime("%Y-%m-%d") | ||
423 | result_queue.put( | ||
424 | { | ||
425 | self.CLASSIFY_KEY: classify, | ||
426 | self.RESULT_KEY: result, | ||
427 | self.DATE_KEY: date_str | ||
428 | } | ||
429 | ) | ||
430 | elif isinstance(result, list) and len(result) > 0: | ||
431 | date_str = time.strftime("%Y-%m-%d") | ||
432 | for res in result: | ||
433 | result_queue.put( | ||
434 | { | ||
435 | self.CLASSIFY_KEY: classify, | ||
436 | self.RESULT_KEY: res, | ||
437 | self.DATE_KEY: date_str | ||
438 | } | ||
439 | ) | ||
440 | |||
441 | def handle(self, *args, **kwargs): | ||
442 | if len(self.input_dirs) == 0: | ||
443 | return | ||
444 | result_queue = Queue() | ||
445 | process_list = [] | ||
446 | one_input_dir = None | ||
447 | for classify_idx, input_dir in self.input_dirs.items(): | ||
448 | if one_input_dir is None: | ||
449 | one_input_dir = input_dir | ||
450 | classify = int(classify_idx.split('_')[0]) | ||
451 | is_combined = True if int(classify_idx.split('_')[2]) == 1 else False | ||
452 | process = Process(target=self.folder_process, args=(input_dir, classify, is_combined, result_queue)) | ||
453 | process_list.append(process) | ||
454 | |||
455 | wb_dir = os.path.dirname(os.path.dirname(one_input_dir)) | ||
456 | wb_process = Process(target=self.wb_process, args=(wb_dir, result_queue, )) | ||
457 | process_list.append(wb_process) | ||
458 | |||
459 | for p in process_list: | ||
460 | p.start() | ||
461 | for p in process_list: | ||
462 | p.join() | ||
463 | |||
464 | self.folder_log.info('{0} [stop safely]'.format(self.log_base)) |
-
Please register or sign in to post a comment