import os import re import time import json import shutil import base64 import signal import requests import traceback from django import db from PIL import Image from datetime import datetime from django.core.management import BaseCommand from multiprocessing import Process, Queue from openpyxl import load_workbook, Workbook from settings import conf from common.mixins import LoggerMixin from common.tools.pdf_to_img import PDFHandler from common.electronic_afc_contract.afc_contract_ocr import predict as afc_predict from apps.doc import consts from apps.doc.exceptions import OCR1Exception, OCR2Exception, LTGTException from apps.doc.ocr.wb import BSWorkbook from apps.doc.models import OfflineReport, AFCOfflineReport from apps.doc.named_enum import OfflineFailureReason class TIFFHandler: def __init__(self, path, img_save_path): self.path = path self.img_save_path = img_save_path self.img_path_list = [] def extract_image(self): os.makedirs(self.img_save_path, exist_ok=True) tiff = Image.open(self.path) tiff.load() for i in range(tiff.n_frames): try: save_path = os.path.join(self.img_save_path, 'page_{0}.jpeg'.format(i)) tiff.seek(i) tiff.save(save_path) self.img_path_list.append(save_path) except EOFError: break class Command(BaseCommand, LoggerMixin): def __init__(self): super().__init__() self.log_base = '[folder ltgt process]' # 处理文件开关 self.switch = True self.amount_key_set = {'债权金额', '贷款本金', '罚息', '律师费', '案件受理费', '支付金额'} self.ltgt_classify_mapping = { 128: '执行裁定书', 129: '民事判决书', 130: '民事调解书' } self.sheet_content = { 128: ['执行裁定书', ['承办法院', '案号/标号', '被执行人', '债权金额', '诉讼时间']], 129: ['民事判决书', ['承办法院', '案号/标号', '被告', '判决结果: 贷款本金', '判决结果: 罚息', '判决结果: 律师费', '判决结果: 案件受理费', '诉讼时间']], 130: ['民事调解书', ['承办法院', '案号/标号', '被告', '协议内容: 支付金额', '协议内容: 案件受理费', '诉讼时间']], } self.FILE_KEY = 'file' self.DATE_KEY = 'date' self.CLASSIFY_KEY = 'classify' self.RESULT_KEY = 'result' self.daily_wb_name = 'Output_{0}.xlsx' self.short_sleep_time = 10 self.long_sleep_time = 3600 # 睡眠时间 self.sleep_time = float(conf.SLEEP_SECOND_FOLDER) # input folder self.input_dirs = conf.get_namespace('LTGT_DIR_') # seperate folder name self.combined_map = { consts.IC_CLASSIFY: 'IDCard', consts.MVC_CLASSIFY: 'GreenBook', consts.CONTRACT_CLASSIFY: 'Contract', } self.field_map = { # sheet_name, key_field, side_field_order, src_field_order consts.VAT_CLASSIFY: (consts.VAT_CN_NAME, None, None, consts.VATS_FIELD_ORDER), consts.IC_CLASSIFY: (consts.IC_CN_NAME, '有效期限', consts.IC_FIELD_ORDER_3, consts.IC_FIELD_ORDER_2), consts.MVC_CLASSIFY: (consts.MVC_CN_NAME, '机动车登记证书编号', consts.MVC_SE_FIELD_ORDER_3_4, consts.MVC_SE_FIELD_ORDER_1_2), } self.field_map_2 = { # sheet_name, key_field, side_field_order, src_field_order consts.CONTRACT_CLASSIFY: (consts.CONTRACT_CN_NAME, None, None, consts.AFC_CON_FIELD_ORDER_LTGT), consts.VAT_CLASSIFY: (consts.VAT_CN_NAME, None, None, consts.VATS_FIELD_ORDER), consts.IC_CLASSIFY: (consts.IC_CN_NAME, '有效期限', consts.IC_FIELD_ORDER_3, consts.IC_FIELD_ORDER_2), consts.MVC_CLASSIFY: (consts.MVC_CN_NAME, None, None, consts.MVC_SE_FIELD_ORDER_LTGT), } # ocr相关 self.ocr_url = conf.OCR_URL_FOLDER self.ocr_url_2 = conf.OCR2_URL_FOLDER # self.ocr_url_4 = conf.IC_URL self.ltgt_ocr_url = conf.LTGT_URL # 优雅退出信号:15 signal.signal(signal.SIGTERM, self.signal_handler) def signal_handler(self, sig, frame): self.switch = False # 停止处理文件 def contract_process(self, ocr_data, contract_result, classify, rebuild_contract_result): contract_dict = ocr_data.get('data') if not contract_dict or contract_dict.get('page_num') is None or contract_dict.get('page_info') is None: return page_num = contract_dict.get('page_num') if page_num.startswith('page_'): page_num_only = page_num.split('_')[-1] else: page_num_only = page_num rebuild_page_info = [] text_key = 'words' for key, value in contract_dict.get('page_info', {}).items(): if value is None: rebuild_page_info.append((key,)) elif text_key in value: if value[text_key] is None: rebuild_page_info.append((key,)) elif isinstance(value[text_key], str): rebuild_page_info.append((key, value[text_key])) elif isinstance(value[text_key], list): rebuild_page_info.append((key,)) for row_list in value[text_key]: rebuild_page_info.append(row_list) else: rebuild_page_info.append((key,)) for sub_key, sub_value in value.items(): if sub_value is None: rebuild_page_info.append((sub_key,)) elif text_key in sub_value: if sub_value[text_key] is None: rebuild_page_info.append((sub_key,)) elif isinstance(sub_value[text_key], str): rebuild_page_info.append((sub_key, sub_value[text_key])) elif isinstance(sub_value[text_key], list): rebuild_page_info.append((sub_key,)) for row_list in sub_value[text_key]: rebuild_page_info.append(row_list) contract_result.setdefault(classify, dict()).setdefault(page_num_only, []).append(rebuild_page_info) page_compare_dict = {} for key, value in contract_dict.get('page_info', {}).items(): if not isinstance(value, dict): continue elif text_key in value: if value[text_key] is None: page_compare_dict[key] = '' elif isinstance(value[text_key], str): page_compare_dict[key] = value[text_key] elif isinstance(value[text_key], list): page_compare_dict[key] = value[text_key] else: page_compare_dict[key] = {} for sub_key, sub_value in value.items(): if sub_value[text_key] is None: page_compare_dict[key][sub_key] = '' elif isinstance(sub_value[text_key], str): page_compare_dict[key][sub_key] = sub_value[text_key] rebuild_contract_result.setdefault(classify, dict())[page_num_only] = page_compare_dict def license1_process(self, ocr_data, all_res, classify): # 类别:'0'身份证, '1'居住证 license_data = ocr_data.get('data') if not license_data: return if isinstance(license_data, dict): license_data.pop('base64_img', '') if classify == consts.IC_CLASSIFY: id_card_dict = {} card_type = license_data.get('type', '') is_ic = card_type.startswith('身份证') is_info_side = card_type.endswith('信息面') id_card_dict['类别'] = '0' if is_ic else '1' if is_ic: field_map = consts.IC_MAP_0 if is_info_side else consts.IC_MAP_1 else: field_map = consts.RP_MAP_0 if is_info_side else consts.RP_MAP_1 for write_field, search_field in field_map: id_card_dict[write_field] = license_data.get('words_result', {}).get(search_field, {}).get('words', '') if not is_info_side: start_time = license_data.get('words_result', {}).get('签发日期', {}).get('words', '') end_time = license_data.get('words_result', {}).get('失效日期', {}).get('words', '') id_card_dict['有效期限'] = '{0}-{1}'.format(start_time, end_time) # for id_card_dict in license_data: # try: # id_card_dict.pop('base64_img') # except Exception as e: # continue all_res.setdefault(classify, []).append(id_card_dict) elif classify == consts.MVC_CLASSIFY: rebuild_data_dict = {} mvc_page = license_data.pop('page', 'VehicleRCI') mvc_res = license_data.pop('results', {}) if mvc_page == 'VehicleRegArea': rebuild_data_dict['机动车登记证书编号'] = mvc_res.get('机动车登记证书编号', {}).get('words', '') for register_info in mvc_res.get('登记信息', []): register_info.pop('register_type', None) register_info.pop('register_type_name', None) for cn_key, detail_dict in register_info.items(): rebuild_data_dict.setdefault(cn_key, []).append( detail_dict.get('words', '')) else: for cn_key, detail_dict in mvc_res.items(): rebuild_data_dict[cn_key] = detail_dict.get('words', '') all_res.setdefault(classify, []).append(rebuild_data_dict) elif classify == consts.CONTRACT_CLASSIFY: pass else: # all_res.extend(license_data) all_res.setdefault(classify, []).extend(license_data) def license2_process(self, ocr_data, all_res, classify, img_path): pid, _, _, _, _, _ = consts.LICENSE_CLASSIFY_MAPPING.get(classify) file_data = ocr_data.get('section_img') if file_data is None: with open(img_path, 'rb') as f: base64_data = base64.b64encode(f.read()) # 获取解码后的base64值 file_data = base64_data.decode() json_data_2 = { "pid": str(pid), "filedata": file_data } for times in range(consts.RETRY_TIMES): try: start_time = time.time() ocr_2_response = requests.post(self.ocr_url_2, data=json_data_2) if ocr_2_response.status_code != 200: raise OCR2Exception('ocr_2 status code: {0}'.format(ocr_2_response.status_code)) except Exception as e: self.folder_log.warn( '{0} [ocr_2 failed] [times={1}] [img_path={2}] [error={3}]'.format( self.log_base, times, img_path, traceback.format_exc())) else: ocr_res_2 = json.loads(ocr_2_response.text) end_time = time.time() speed_time = int(end_time - start_time) self.folder_log.info( '{0} [ocr_2 success] [img={1}] [speed_time={2}]'.format( self.log_base, img_path, speed_time)) if ocr_res_2.get('ErrorCode') in consts.SUCCESS_CODE_SET: if pid == consts.BC_PID: all_res.append(ocr_res_2) else: # 营业执照等 for result_dict in ocr_res_2.get('ResultList', []): res_dict = {} for field_dict in result_dict.get('FieldList', []): res_dict[field_dict.get('chn_key', '')] = field_dict.get('value', '') all_res.append(res_dict) break @staticmethod def parse_img_path(img_path): # 'page_{0}_img_{1}.{2}'.format(pno, img_index, ext) img_name, _ = os.path.splitext(os.path.basename(img_path)) if re.match(r'page_\d+_img_\d+', img_name): part_list = img_name.split('_') return img_name, int(part_list[1])+1, int(part_list[3])+1 else: return img_name, 1, 1 @staticmethod def get_path(name, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir_map): time_stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') new_name = '{0}_{1}'.format(time_stamp, name) img_save_path = os.path.join(img_output_dir, new_name) pdf_save_path = os.path.join(pdf_output_dir, new_name) excel_name = '{0}.xlsx'.format(os.path.splitext(new_name)[0]) excel_path = os.path.join(wb_output_dir, excel_name) seperate_path_map = dict() if len(seperate_dir_map) > 0: for c, seperate_dir in seperate_dir_map.items(): seperate_path_map[c] = os.path.join(seperate_dir, new_name) return img_save_path, excel_path, pdf_save_path, seperate_path_map @staticmethod def all_res_add_contract(all_res, rebuild_contract_result): for classify, page_info_dict in rebuild_contract_result.items(): res = {} is_asp = False for key, (pno_not_asp, pno_asp, key1, key2) in consts.AFC_CON_MAP_LTGT.items(): pno = pno_asp if is_asp else pno_not_asp if pno is None: if isinstance(pno_asp, int): continue end_idx = 9 if is_asp else 8 for i in range(1, end_idx): res.setdefault(key, list()).append(page_info_dict.get(str(i), {}).get(key1, '')) elif key2 is None: res[key] = page_info_dict.get(str(pno), {}).get(key1, '') res.setdefault(consts.IMG_PATH_KEY, dict())[key] = page_info_dict.get(str(pno), {}).get( consts.IMG_PATH_KEY, '') else: res[key] = page_info_dict.get(str(pno), {}).get(key1, {}).get(key2, '') res.setdefault(consts.IMG_PATH_KEY, dict())[key] = page_info_dict.get(str(pno), {}).get( consts.IMG_PATH_KEY, '') all_res[classify] = [res] def res_process(self, all_res, excel_path, classify, contract_result, rebuild_contract_result): try: wb = BSWorkbook(set(), set(), set(), set(), set()) for c, res_list in all_res.items(): sheet_name, key_field, side_field_order, src_field_order = self.field_map.get(c) ws = wb.create_sheet(sheet_name) for res in res_list: if key_field is not None and key_field in res: field_order = side_field_order else: field_order = src_field_order for search_field, write_field in field_order: field_value = res.get(search_field, '') if isinstance(field_value, list): ws.append((write_field, *field_value)) else: ws.append((write_field, field_value)) ws.append((None,)) wb.contract_rebuild(contract_result) wb.remove_base_sheet() wb.save(excel_path) self.all_res_add_contract(all_res, rebuild_contract_result) except Exception as e: self.folder_log.error('{0} [wb build error] [path={1}] [error={2}]'.format( self.log_base, excel_path, traceback.format_exc())) def basename(self, path): # A basename() variant which first strips the trailing slash, if present. # Thus we always get the last component of the path, even for directories. sep = os.path.sep + (os.path.altsep or '') return os.path.basename(path.rstrip(sep)) def ocr_process(self, img_path, classify, all_res, seperate_path_map, contract_result, rebuild_contract_result): if os.path.exists(img_path): # TODO 图片验证 with open(img_path, 'rb') as f: base64_data = base64.b64encode(f.read()) # 获取解码后的base64值 file_data = base64_data.decode() json_data = { "file": file_data, "channel": consts.AFC_PREFIX, } if len(seperate_path_map) == 0: json_data["classify"] = classify for times in range(consts.RETRY_TIMES): try: start_time = time.time() ocr_response = requests.post(self.ocr_url, json=json_data) if ocr_response.status_code != 200: raise OCR1Exception('{0} ocr status code: {1}'.format(self.log_base, ocr_response.status_code)) except Exception as e: self.folder_log.warn('{0} [ocr failed] [times={1}] [img_path={2}] [error={3}]'.format( self.log_base, times, img_path, traceback.format_exc())) else: ocr_res = ocr_response.json() end_time = time.time() speed_time = int(end_time - start_time) self.folder_log.info('{0} [ocr success] [img={1}] [speed_time={2}]'.format( self.log_base, img_path, speed_time)) if isinstance(ocr_res, dict): if ocr_res.get('code') == 1: data_list = ocr_res.get('data', []) if isinstance(data_list, list): for ocr_data in data_list: new_classify = ocr_data.get('classify') if new_classify in seperate_path_map or new_classify == classify: if new_classify in seperate_path_map: seperate_dir = seperate_path_map[new_classify] os.makedirs(seperate_dir, exist_ok=True) real_dst = os.path.join(seperate_dir, self.basename(img_path)) if not os.path.exists(real_dst): shutil.move(img_path, seperate_dir) if new_classify in consts.LICENSE_CLASSIFY_SET_1: self.license1_process(ocr_data, all_res, new_classify) elif new_classify in consts.LICENSE_CLASSIFY_SET_2: self.license2_process(ocr_data, all_res, new_classify, img_path) elif new_classify in consts.CONTRACT_SET: self.contract_process(ocr_data, contract_result, new_classify, rebuild_contract_result) break else: self.folder_log.warn('{0} [ocr failed] [img_path={1}]'.format(self.log_base, img_path)) def ltgt_ocr_process(self, img_path_list, label, path): img_data_list = [] for img_path in img_path_list: if os.path.exists(img_path): with open(img_path, 'rb') as f: base64_data = base64.b64encode(f.read()) # 获取解码后的base64值 file_data = base64_data.decode() img_data_list.append(file_data) json_data = { "label": label, "img_data_list": img_data_list } for times in range(consts.RETRY_TIMES): try: start_time = time.time() ocr_response = requests.post(self.ltgt_ocr_url, json=json_data) if ocr_response.status_code != 200: raise LTGTException('{0} ltgt ocr status code: {1}'.format(self.log_base, ocr_response.status_code)) except Exception as e: self.folder_log.warn('{0} [ltgt ocr failed] [times={1}] [path={2}] [error={3}]'.format( self.log_base, times, path, traceback.format_exc())) else: ocr_res = ocr_response.json() end_time = time.time() speed_time = int(end_time - start_time) self.folder_log.info('{0} [ltgt ocr success] [path={1}] [speed_time={2}]'.format( self.log_base, path, speed_time)) return ocr_res else: self.folder_log.warn('{0} [ltgt ocr failed] [path={1}]'.format(self.log_base, path)) def ltgt_res_process(self, ocr_res, label, excel_path): try: if isinstance(ocr_res, dict): if ocr_res.get('code') == 1: result_dict = ocr_res.get('data', {}) wb = BSWorkbook(set(), set(), set(), set(), set()) rebuild_res = wb.ltgt_build(label, result_dict, self.amount_key_set) wb.remove_base_sheet() wb.save(excel_path) return rebuild_res except Exception as e: self.folder_log.error('{0} [wb build error] [path={1}] [error={2}]'.format( self.log_base, excel_path, traceback.format_exc())) def ltgt_process(self, img_path_list, label, excel_path, path): ocr_res = self.ltgt_ocr_process(img_path_list, label, path) rebuild_res = self.ltgt_res_process(ocr_res, label, excel_path) return rebuild_res def images_process(self, img_path_list, classify, excel_path, seperate_path_map): all_res = dict() contract_result = dict() rebuild_contract_result = dict() for img_path in img_path_list: self.ocr_process(img_path, classify, all_res, seperate_path_map, contract_result, rebuild_contract_result) # if len(all_res) > 0: self.res_process(all_res, excel_path, classify, contract_result, rebuild_contract_result) return all_res def pdf_process(self, name, path, classify, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir_map): if os.path.exists(path): img_save_path, excel_path, pdf_save_path, seperate_path_map = self.get_path( name, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir_map) pdf_handler = PDFHandler(path, img_save_path) if classify == consts.CONTRACT_CLASSIFY: try: self.folder_log.info('{0} [e-contract pdf to img start] [path={1}]'.format(self.log_base, path)) pdf_handler.e_contract_process() self.folder_log.info('{0} [e-contract pdf to img end] [path={1}]'.format(self.log_base, path)) except Exception as e: self.folder_log.error('{0} [e-contract pdf to img error] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) raise e else: ocr_result = afc_predict(pdf_handler.pdf_info) contract_result = dict() rebuild_contract_result = dict() page_res = {} all_res = dict() for page_num, page_info in ocr_result.get('page_info', {}).items(): if isinstance(page_num, str) and page_num.startswith('page_'): page_res[page_num] = { 'classify': classify, "is_asp": ocr_result.get('is_asp', False), 'page_num': page_num, 'page_info': page_info } for _, page_key in pdf_handler.img_path_pno_list: if page_key in page_res: ocr_data = { 'classify': page_res[page_key].pop('classify', consts.OTHER_CLASSIFY), 'data': page_res[page_key] } self.contract_process(ocr_data, contract_result, classify, rebuild_contract_result) self.res_process(all_res, excel_path, classify, contract_result, rebuild_contract_result) shutil.move(path, pdf_save_path) return all_res else: try: self.folder_log.info('{0} [pdf to img start] [path={1}]'.format(self.log_base, path)) if classify in self.ltgt_classify_mapping: pdf_handler.extract_page_image() else: pdf_handler.extract_image() self.folder_log.info('{0} [pdf to img end] [path={1}]'.format(self.log_base, path)) except Exception as e: self.folder_log.error('{0} [pdf to img error] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) raise e else: if classify in self.ltgt_classify_mapping: ltgt_res = self.ltgt_process(pdf_handler.img_path_list, self.ltgt_classify_mapping[classify], excel_path, path) rebuild_res = { classify: [ltgt_res] } else: rebuild_res = self.images_process(pdf_handler.img_path_list, classify, excel_path, seperate_path_map) shutil.move(path, pdf_save_path) return rebuild_res def tif_process(self, name, path, classify, img_output_dir, wb_output_dir, tiff_output_dir, seperate_dir_map): if os.path.exists(path): try: img_save_path, excel_path, tiff_save_path, seperate_path_map = self.get_path( name, img_output_dir, wb_output_dir, tiff_output_dir, seperate_dir_map) self.folder_log.info('{0} [tiff to img start] [path={1}]'.format(self.log_base, path)) tiff_handler = TIFFHandler(path, img_save_path) tiff_handler.extract_image() self.folder_log.info('{0} [tiff to img end] [path={1}]'.format(self.log_base, path)) except Exception as e: self.folder_log.error('{0} [tiff to img error] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) raise e else: if classify in self.ltgt_classify_mapping: ltgt_res = self.ltgt_process(tiff_handler.img_path_list, self.ltgt_classify_mapping[classify], excel_path, path) rebuild_res = { classify: [ltgt_res] } else: rebuild_res = self.images_process(tiff_handler.img_path_list, classify, excel_path, seperate_path_map) shutil.move(path, tiff_save_path) return rebuild_res def img_process(self, name, path, classify, wb_output_dir, img_output_dir, pdf_output_dir, seperate_dir_map): try: img_save_path, excel_path, _, seperate_path_map = self.get_path( name, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir_map) except Exception as e: self.folder_log.error('{0} [get path error] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) else: if classify in self.ltgt_classify_mapping: ltgt_res = self.ltgt_process([path], self.ltgt_classify_mapping[classify], excel_path, path) rebuild_res = { classify: [ltgt_res] } else: rebuild_res = self.images_process([path], classify, excel_path, seperate_path_map) if os.path.exists(path): shutil.move(path, img_save_path) return rebuild_res def wb_process(self, wb_dir, result_queue): while self.switch: result_list = [] date_str = None for i in range(100): try: result = result_queue.get(block=False) except Exception as e: time.sleep(self.short_sleep_time) else: if date_str is None: date_str = result[self.DATE_KEY] result_list.append(result) elif result[self.DATE_KEY] == date_str: result_list.append(result) else: break if date_str is None: time.sleep(self.long_sleep_time) continue else: wb_name = self.daily_wb_name.format(date_str) wb_path = os.path.join(wb_dir, wb_name) if os.path.isfile(wb_path): wb = load_workbook(wb_path) else: wb = Workbook() for result in result_list: try: if result[self.CLASSIFY_KEY] in self.sheet_content: sheet_name, head_fields = self.sheet_content[result[self.CLASSIFY_KEY]] first_head_row = head_fields else: sheet_name, key_field, side_field_order, field_order = self.field_map_2[result[self.CLASSIFY_KEY]] if key_field is not None and key_field in result[self.RESULT_KEY] and result[self.CLASSIFY_KEY] == consts.IC_CLASSIFY: continue if key_field is not None and len(side_field_order) > len(field_order): first_head_row = [] for _, b in side_field_order: first_head_row.append(b) else: first_head_row = [] for _, b in field_order: first_head_row.append(b) if key_field is not None and key_field in result[self.RESULT_KEY]: head_fields = [] for a, _ in side_field_order: head_fields.append(a) else: head_fields = [] for a, _ in field_order: head_fields.append(a) row = [result[self.FILE_KEY]] for field in head_fields: ocr_str_or_list = result[self.RESULT_KEY].get(field, '') if isinstance(ocr_str_or_list, list): last_ocr_str = '、'.join(ocr_str_or_list) else: last_ocr_str = ocr_str_or_list row.append(last_ocr_str) if sheet_name in wb.sheetnames: ws = wb.get_sheet_by_name(sheet_name) else: ws = wb.create_sheet(sheet_name) first_head_row.insert(0, '文件名') ws.append(first_head_row) ws.append(row) except Exception as e: self.folder_log.info('{0} [daily wb failed] [result={1}] [error={2}]'.format( self.log_base, result, traceback.format_exc())) wb.save(wb_path) def folder_process(self, input_dir, classify, is_combined, result_queue): while not os.path.isdir(input_dir): self.folder_log.info('{0} [input dir is not dir] [input_dir={1}]'.format(self.log_base, input_dir)) if self.switch: time.sleep(self.sleep_time) continue else: return output_dir = os.path.join(os.path.dirname(input_dir), 'Output') img_output_dir = os.path.join(output_dir, 'image') wb_output_dir = os.path.join(output_dir, 'excel') pdf_output_dir = os.path.join(output_dir, 'pdf') tiff_output_dir = os.path.join(output_dir, 'tiff') failed_output_dir = os.path.join(output_dir, 'failed') os.makedirs(output_dir, exist_ok=True) os.makedirs(img_output_dir, exist_ok=True) os.makedirs(wb_output_dir, exist_ok=True) os.makedirs(pdf_output_dir, exist_ok=True) os.makedirs(tiff_output_dir, exist_ok=True) os.makedirs(failed_output_dir, exist_ok=True) if is_combined: seperate_dir_map = dict() for c in self.combined_map.keys(): seperate_dir = os.path.join(output_dir, self.combined_map[c]) os.makedirs(seperate_dir, exist_ok=True) seperate_dir_map[c] = seperate_dir else: seperate_dir_map = dict() os_error_filename_set = set() while self.switch: # if not os.path.isdir(input_dir): # self.folder_log.info('{0} [input dir is not dir] [input_dir={1}]'.format(self.log_base, input_dir)) # time.sleep(self.sleep_time) # continue # 1. 从input dir获取pdf or image list_dir = os.listdir(input_dir) if not list_dir and len(os_error_filename_set) == 0: self.folder_log.info('{0} [input dir empty] [input_dir={1}]'.format(self.log_base, input_dir)) time.sleep(self.sleep_time) continue all_file_set = set(list_dir) true_file_set = all_file_set - os_error_filename_set if len(true_file_set) == 0 and len(os_error_filename_set) > 0: true_file_set.add(os_error_filename_set.pop()) for name in true_file_set: time.sleep(5) path = os.path.join(input_dir, name) is_success = True failure_reason = OfflineFailureReason.OS_ERROR.value start_time = time.time() try: if not os.path.exists(path): self.folder_log.info('{0} [path is not exists] [path={1}]'.format(self.log_base, path)) continue elif os.path.isfile(path): self.folder_log.info('{0} [file start] [path={1}]'.format(self.log_base, path)) if name.endswith('.pdf') or name.endswith('.PDF'): result = self.pdf_process(name, path, classify, img_output_dir, wb_output_dir, pdf_output_dir, seperate_dir_map) elif name.endswith('.tif') or name.endswith('.TIF') or name.endswith('.tiff') or \ name.endswith('.TIFF'): if classify == consts.CONTRACT_CLASSIFY: raise LTGTException('e-contract must be pdf') result = self.tif_process(name, path, classify, img_output_dir, wb_output_dir, tiff_output_dir, seperate_dir_map) else: if classify == consts.CONTRACT_CLASSIFY: raise LTGTException('e-contract must be pdf') result = self.img_process(name, path, classify, wb_output_dir, img_output_dir, pdf_output_dir, seperate_dir_map) self.folder_log.info('{0} [file end] [path={1}]'.format(self.log_base, path)) else: result = None self.folder_log.info('{0} [path is dir] [path={1}]'.format(self.log_base, path)) failed_path = os.path.join(failed_output_dir, '{0}_{1}'.format(time.time(), name)) shutil.move(path, failed_path) except OSError: is_success = False failure_reason = OfflineFailureReason.OS_ERROR.value os_error_filename_set.add(name) self.folder_log.error('{0} [os error] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) except Exception as e: is_success = False failure_reason = OfflineFailureReason.PROCESS_ERROR.value try: self.folder_log.error('{0} [file error] [path={1}] [error={2}]'.format(self.log_base, path, traceback.format_exc())) failed_path = os.path.join(failed_output_dir, '{0}_{1}'.format(time.time(), name)) shutil.move(path, failed_path) except Exception as e: failure_reason = OfflineFailureReason.OS_ERROR.value os_error_filename_set.add(name) self.folder_log.error('{0} [file move error] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) else: if isinstance(result, dict) and len(result) > 0: date_str = time.strftime("%Y-%m-%d") for c, res_list in result.items(): if c == consts.MVC_CLASSIFY: rebuild_res = {} for tmp_res in res_list: rebuild_res.update(tmp_res) result_queue.put( { self.CLASSIFY_KEY: c, self.RESULT_KEY: rebuild_res, self.DATE_KEY: date_str, self.FILE_KEY: name, } ) else: for res in res_list: result_queue.put( { self.CLASSIFY_KEY: c, self.RESULT_KEY: res, self.DATE_KEY: date_str, self.FILE_KEY: name, } ) finally: end_time = time.time() try: report_table = OfflineReport if input_dir.find(consts.HIL_PREFIX) != -1 else AFCOfflineReport report_table.objects.create( input_folder=input_dir, doc_type=classify, file_name=name, status=is_success, failure_reason=failure_reason, duration=int(end_time - start_time) ) except Exception as e: self.folder_log.error('{0} [db save failed] [path={1}] [error={2}]'.format( self.log_base, path, traceback.format_exc())) def handle(self, *args, **kwargs): db.close_old_connections() if len(self.input_dirs) == 0: return result_queue = Queue() process_list = [] one_input_dir = None for classify_idx, input_dir in self.input_dirs.items(): if one_input_dir is None: one_input_dir = input_dir classify = int(classify_idx.split('_')[0]) is_combined = True if int(classify_idx.split('_')[2]) == 1 else False process = Process(target=self.folder_process, args=(input_dir, classify, is_combined, result_queue)) process_list.append(process) wb_dir = os.path.dirname(os.path.dirname(one_input_dir)) wb_process = Process(target=self.wb_process, args=(wb_dir, result_queue, )) process_list.append(wb_process) for p in process_list: p.start() for p in process_list: p.join() self.folder_log.info('{0} [stop safely]'.format(self.log_base))