update code
Showing
37 changed files
with
265 additions
and
246 deletions
.idea/.gitignore
0 → 100644
.idea/inspectionProfiles/Project_Default.xml
0 → 100644
1 | <component name="InspectionProjectProfileManager"> | ||
2 | <profile version="1.0"> | ||
3 | <option name="myName" value="Project Default" /> | ||
4 | <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true"> | ||
5 | <option name="ignoredPackages"> | ||
6 | <value> | ||
7 | <list size="2"> | ||
8 | <item index="0" class="java.lang.String" itemvalue="psutil" /> | ||
9 | <item index="1" class="java.lang.String" itemvalue="thop" /> | ||
10 | </list> | ||
11 | </value> | ||
12 | </option> | ||
13 | </inspection_tool> | ||
14 | </profile> | ||
15 | </component> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/misc.xml
0 → 100644
.idea/modules.xml
0 → 100644
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <project version="4"> | ||
3 | <component name="ProjectModuleManager"> | ||
4 | <modules> | ||
5 | <module fileurl="file://$PROJECT_DIR$/.idea/tamper_det.iml" filepath="$PROJECT_DIR$/.idea/tamper_det.iml" /> | ||
6 | </modules> | ||
7 | </component> | ||
8 | </project> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/tamper_det.iml
0 → 100644
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <module type="PYTHON_MODULE" version="4"> | ||
3 | <component name="NewModuleRootManager"> | ||
4 | <content url="file://$MODULE_DIR$" /> | ||
5 | <orderEntry type="jdk" jdkName="Python 3.6 (workenv)" jdkType="Python SDK" /> | ||
6 | <orderEntry type="sourceFolder" forTests="false" /> | ||
7 | </component> | ||
8 | <component name="PyDocumentationSettings"> | ||
9 | <option name="format" value="PLAIN" /> | ||
10 | <option name="myDocStringFormat" value="Plain" /> | ||
11 | </component> | ||
12 | </module> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/vcs.xml
0 → 100644
README.md
deleted
100644 → 0
This diff is collapsed.
Click to expand it.
classify/predict.py
deleted
100644 → 0
This diff is collapsed.
Click to expand it.
classify/train.py
deleted
100644 → 0
This diff is collapsed.
Click to expand it.
classify/val.py
deleted
100644 → 0
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Validate a trained YOLOv5 classification model on a classification dataset | ||
4 | |||
5 | Usage: | ||
6 | $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) | ||
7 | $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet | ||
8 | |||
9 | Usage - formats: | ||
10 | $ python classify/val.py --weights yolov5s-cls.pt # PyTorch | ||
11 | yolov5s-cls.torchscript # TorchScript | ||
12 | yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn | ||
13 | yolov5s-cls.xml # OpenVINO | ||
14 | yolov5s-cls.engine # TensorRT | ||
15 | yolov5s-cls.mlmodel # CoreML (macOS-only) | ||
16 | yolov5s-cls_saved_model # TensorFlow SavedModel | ||
17 | yolov5s-cls.pb # TensorFlow GraphDef | ||
18 | yolov5s-cls.tflite # TensorFlow Lite | ||
19 | yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU | ||
20 | yolov5s-cls_paddle_model # PaddlePaddle | ||
21 | """ | ||
22 | |||
23 | import argparse | ||
24 | import os | ||
25 | import sys | ||
26 | from pathlib import Path | ||
27 | |||
28 | import torch | ||
29 | from tqdm import tqdm | ||
30 | |||
31 | FILE = Path(__file__).resolve() | ||
32 | ROOT = FILE.parents[1] # YOLOv5 root directory | ||
33 | if str(ROOT) not in sys.path: | ||
34 | sys.path.append(str(ROOT)) # add ROOT to PATH | ||
35 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative | ||
36 | |||
37 | from models.common import DetectMultiBackend | ||
38 | from utils.dataloaders import create_classification_dataloader | ||
39 | from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args | ||
40 | from utils.torch_utils import select_device, smart_inference_mode | ||
41 | |||
42 | |||
43 | @smart_inference_mode() | ||
44 | def run( | ||
45 | data=ROOT / '../datasets/mnist', # dataset dir | ||
46 | weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) | ||
47 | batch_size=128, # batch size | ||
48 | imgsz=224, # inference size (pixels) | ||
49 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu | ||
50 | workers=8, # max dataloader workers (per RANK in DDP mode) | ||
51 | verbose=False, # verbose output | ||
52 | project=ROOT / 'runs/val-cls', # save to project/name | ||
53 | name='exp', # save to project/name | ||
54 | exist_ok=False, # existing project/name ok, do not increment | ||
55 | half=False, # use FP16 half-precision inference | ||
56 | dnn=False, # use OpenCV DNN for ONNX inference | ||
57 | model=None, | ||
58 | dataloader=None, | ||
59 | criterion=None, | ||
60 | pbar=None, | ||
61 | ): | ||
62 | # Initialize/load model and set device | ||
63 | training = model is not None | ||
64 | if training: # called by train.py | ||
65 | device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model | ||
66 | half &= device.type != 'cpu' # half precision only supported on CUDA | ||
67 | model.half() if half else model.float() | ||
68 | else: # called directly | ||
69 | device = select_device(device, batch_size=batch_size) | ||
70 | |||
71 | # Directories | ||
72 | save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run | ||
73 | save_dir.mkdir(parents=True, exist_ok=True) # make dir | ||
74 | |||
75 | # Load model | ||
76 | model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) | ||
77 | stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine | ||
78 | imgsz = check_img_size(imgsz, s=stride) # check image size | ||
79 | half = model.fp16 # FP16 supported on limited backends with CUDA | ||
80 | if engine: | ||
81 | batch_size = model.batch_size | ||
82 | else: | ||
83 | device = model.device | ||
84 | if not (pt or jit): | ||
85 | batch_size = 1 # export.py models default to batch-size 1 | ||
86 | LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') | ||
87 | |||
88 | # Dataloader | ||
89 | data = Path(data) | ||
90 | test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val | ||
91 | dataloader = create_classification_dataloader(path=test_dir, | ||
92 | imgsz=imgsz, | ||
93 | batch_size=batch_size, | ||
94 | augment=False, | ||
95 | rank=-1, | ||
96 | workers=workers) | ||
97 | |||
98 | model.eval() | ||
99 | pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) | ||
100 | n = len(dataloader) # number of batches | ||
101 | action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' | ||
102 | desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" | ||
103 | bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) | ||
104 | with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): | ||
105 | for images, labels in bar: | ||
106 | with dt[0]: | ||
107 | images, labels = images.to(device, non_blocking=True), labels.to(device) | ||
108 | |||
109 | with dt[1]: | ||
110 | y = model(images) | ||
111 | |||
112 | with dt[2]: | ||
113 | pred.append(y.argsort(1, descending=True)[:, :5]) | ||
114 | targets.append(labels) | ||
115 | if criterion: | ||
116 | loss += criterion(y, labels) | ||
117 | |||
118 | loss /= n | ||
119 | pred, targets = torch.cat(pred), torch.cat(targets) | ||
120 | correct = (targets[:, None] == pred).float() | ||
121 | acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy | ||
122 | top1, top5 = acc.mean(0).tolist() | ||
123 | |||
124 | if pbar: | ||
125 | pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" | ||
126 | if verbose: # all classes | ||
127 | LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") | ||
128 | LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") | ||
129 | for i, c in model.names.items(): | ||
130 | aci = acc[targets == i] | ||
131 | top1i, top5i = aci.mean(0).tolist() | ||
132 | LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") | ||
133 | |||
134 | # Print results | ||
135 | t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image | ||
136 | shape = (1, 3, imgsz, imgsz) | ||
137 | LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) | ||
138 | LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") | ||
139 | |||
140 | return top1, top5, loss | ||
141 | |||
142 | |||
143 | def parse_opt(): | ||
144 | parser = argparse.ArgumentParser() | ||
145 | parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') | ||
146 | parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') | ||
147 | parser.add_argument('--batch-size', type=int, default=128, help='batch size') | ||
148 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') | ||
149 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ||
150 | parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') | ||
151 | parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') | ||
152 | parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') | ||
153 | parser.add_argument('--name', default='exp', help='save to project/name') | ||
154 | parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') | ||
155 | parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') | ||
156 | parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') | ||
157 | opt = parser.parse_args() | ||
158 | print_args(vars(opt)) | ||
159 | return opt | ||
160 | |||
161 | |||
162 | def main(opt): | ||
163 | check_requirements(exclude=('tensorboard', 'thop')) | ||
164 | run(**vars(opt)) | ||
165 | |||
166 | |||
167 | if __name__ == "__main__": | ||
168 | opt = parse_opt() | ||
169 | main(opt) |
... | @@ -215,11 +215,11 @@ def run( | ... | @@ -215,11 +215,11 @@ def run( |
215 | 215 | ||
216 | def parse_opt(): | 216 | def parse_opt(): |
217 | parser = argparse.ArgumentParser() | 217 | parser = argparse.ArgumentParser() |
218 | parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') | 218 | parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/exp/weights/best.pt', help='model path or triton URL') |
219 | parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') | 219 | parser.add_argument('--source', type=str, default=ROOT / 'data/images/crop_img', help='file/dir/URL/glob/screen/0(webcam)') |
220 | parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') | 220 | parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') |
221 | parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') | 221 | parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') |
222 | parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') | 222 | parser.add_argument('--conf-thres', type=float, default=0.3, help='confidence threshold') |
223 | parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') | 223 | parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') |
224 | parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') | 224 | parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') |
225 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | 225 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ... | ... |
... | @@ -17,7 +17,8 @@ def iou(box, boxes): | ... | @@ -17,7 +17,8 @@ def iou(box, boxes): |
17 | inner = np.maximum(0, (xx2 - xx1) * (yy2 - yy1)) | 17 | inner = np.maximum(0, (xx2 - xx1) * (yy2 - yy1)) |
18 | return inner / (area1 + areas - inner) | 18 | return inner / (area1 + areas - inner) |
19 | 19 | ||
20 | def get_evaluate_score(true_image_path,true_label_path,predict_label_path,threshold): | 20 | |
21 | def get_evaluate_score(true_image_path, true_label_path, predict_label_path, threshold): | ||
21 | true_labels = os.listdir(true_label_path) | 22 | true_labels = os.listdir(true_label_path) |
22 | predict_labels = os.listdir(predict_label_path) | 23 | predict_labels = os.listdir(predict_label_path) |
23 | targets, predicts = [], [] | 24 | targets, predicts = [], [] |
... | @@ -38,7 +39,7 @@ def get_evaluate_score(true_image_path,true_label_path,predict_label_path,thresh | ... | @@ -38,7 +39,7 @@ def get_evaluate_score(true_image_path,true_label_path,predict_label_path,thresh |
38 | predicts.append(0) | 39 | predicts.append(0) |
39 | else: | 40 | else: |
40 | tmp = 0 | 41 | tmp = 0 |
41 | predict_label = open(os.path.join(predict_label_path,label)).readlines() | 42 | predict_label = open(os.path.join(predict_label_path, label)).readlines() |
42 | boxes = [] | 43 | boxes = [] |
43 | for pl in predict_label: | 44 | for pl in predict_label: |
44 | cls, x1, y1, w1, h1 = [float(i) for i in pl.strip().split(' ')] | 45 | cls, x1, y1, w1, h1 = [float(i) for i in pl.strip().split(' ')] |
... | @@ -50,23 +51,28 @@ def get_evaluate_score(true_image_path,true_label_path,predict_label_path,thresh | ... | @@ -50,23 +51,28 @@ def get_evaluate_score(true_image_path,true_label_path,predict_label_path,thresh |
50 | x1, y1, w1, h1 = int(x1 * w), int(y1 * h), int(w1 * w), int(h1 * h) | 51 | x1, y1, w1, h1 = int(x1 * w), int(y1 * h), int(w1 * w), int(h1 * h) |
51 | xx1, yy1, xx2, yy2 = x1 - w1 // 2, y1 - h1 // 2, x1 + w1 // 2, y1 + h1 // 2 | 52 | xx1, yy1, xx2, yy2 = x1 - w1 // 2, y1 - h1 // 2, x1 + w1 // 2, y1 + h1 // 2 |
52 | box1 = [xx1, yy1, xx2, yy2] | 53 | box1 = [xx1, yy1, xx2, yy2] |
53 | inner_score = iou(np.array(box1),np.array(boxes)) | 54 | inner_score = iou(np.array(box1), np.array(boxes)) |
54 | if max(inner_score)>threshold: | 55 | if max(inner_score) > threshold: |
55 | tmp=1 | 56 | tmp = 1 |
56 | predicts.append(1) | 57 | predicts.append(1) |
57 | break | 58 | break |
58 | if tmp==0: | 59 | if tmp == 0: |
59 | predicts.append(0) | 60 | predicts.append(0) |
60 | p = precision_score(targets,predicts) | 61 | p = precision_score(targets, predicts) |
61 | r = recall_score(targets,predicts) | 62 | r = recall_score(targets, predicts) |
62 | conf = confusion_matrix(targets,predicts) | 63 | conf = confusion_matrix(targets, predicts) |
63 | print('precison:',p) | 64 | print('precison:', p) |
64 | print('recall:',r) | 65 | print('recall:', r) |
65 | print(conf) | 66 | print(conf) |
66 | 67 | print(f' 预 测 ') | |
68 | print(f' authentic tampered ') | ||
69 | print(f'真 authentic \t\t{conf[0, 0]} \t\t{conf[0,1]}') | ||
70 | print(f'实 tempered \t\t{conf[1, 0]} \t\t\t{conf[1,1]}') | ||
71 | print(f'authentic precision:{conf[0,0]/(conf[0,0]+conf[1,0])}\trecall:{conf[0, 0]/(conf[0, 0]+conf[0, 1])}') | ||
72 | print(f'tampered precision:{conf[1, 1]/(conf[0, 1]+conf[1, 1])}\trecall:{conf[1, 1]/(conf[1, 0]+conf[1, 1])}') | ||
67 | if __name__ == '__main__': | 73 | if __name__ == '__main__': |
68 | true_image_path = '/home/qfs/WorkSpace/ps_tamper/yolov5_ps/val_data/minsheng/images' | 74 | true_image_path = '/data/situ_invoice_bill_data/qfs_train_val_data/gongshang/images/val' |
69 | true_label_path = '/home/qfs/WorkSpace/ps_tamper/yolov5_ps/val_data/minsheng/labels' | 75 | true_label_path = '/data/situ_invoice_bill_data/qfs_train_val_data/gongshang/labels/val' |
70 | predict_label_path = '/home/qfs/WorkSpace/ps_tamper/yolov5/runs/detect/exp/labels' | 76 | predict_label_path = '/home/situ/qfs/invoice_tamper/09_project/project/tamper_det/runs/detect/exp4/labels' |
71 | threshold = 0.1 | 77 | threshold = 0.1 |
72 | get_evaluate_score(true_image_path,true_label_path,predict_label_path,threshold) | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
78 | get_evaluate_score(true_image_path, true_label_path, predict_label_path, threshold) | ... | ... |
inference.py
0 → 100644
1 | import copy | ||
2 | import os | ||
3 | import sys | ||
4 | from pathlib import Path | ||
5 | import numpy as np | ||
6 | import torch | ||
7 | |||
8 | from utils.augmentations import letterbox | ||
9 | |||
10 | FILE = Path(__file__).resolve() | ||
11 | ROOT = FILE.parents[0] # YOLOv5 root directory | ||
12 | if str(ROOT) not in sys.path: | ||
13 | sys.path.append(str(ROOT)) # add ROOT to PATH | ||
14 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative | ||
15 | from models.common import DetectMultiBackend | ||
16 | from utils.general import (check_img_size, cv2, non_max_suppression, scale_boxes) | ||
17 | from utils.torch_utils import select_device, smart_inference_mode | ||
18 | from models.yolov5_config import config | ||
19 | |||
20 | classes = ['tampered'] | ||
21 | |||
22 | |||
23 | def gen_result_dict(boxes, label_list=[], std=False): | ||
24 | result = { | ||
25 | "error_code": 1, | ||
26 | "result": [] | ||
27 | } | ||
28 | rs_box = { | ||
29 | "class": '', | ||
30 | "score": 0, | ||
31 | "left": 0, | ||
32 | "top": 0, | ||
33 | "width": 0, | ||
34 | "height": 0 | ||
35 | } | ||
36 | |||
37 | if not label_list: | ||
38 | label_list = classes | ||
39 | |||
40 | for box in boxes: | ||
41 | result['error_code'] = 0 | ||
42 | box_dict = copy.deepcopy(rs_box) | ||
43 | if std: | ||
44 | box_dict['class'] = str(int(box[-1])) | ||
45 | else: | ||
46 | box_dict['class'] = label_list[int(box[-1])] | ||
47 | |||
48 | box_dict['left'] = int(round(box[0], 0)) | ||
49 | box_dict['top'] = int(round(box[1], 0)) | ||
50 | box_dict['width'] = int(round(box[2], 0) - round(box[0], 0)) | ||
51 | box_dict['height'] = int(round(box[3], 0) - (round(box[1], 0))) | ||
52 | box_dict['score'] = box[-2] | ||
53 | result['result'].append(box_dict) | ||
54 | return result | ||
55 | |||
56 | |||
57 | class Yolov5: | ||
58 | def __init__(self, cfg=None): | ||
59 | self.cfg = cfg | ||
60 | self.device = select_device(self.cfg.device) | ||
61 | self.model = DetectMultiBackend(self.cfg.weights, device=self.device, dnn=False, data=self.cfg.data, fp16=False) | ||
62 | |||
63 | def detect(self, image): | ||
64 | image0 = image.copy() | ||
65 | stride, names, pt = self.model.stride, self.model.names, self.model.pt | ||
66 | imgsz = check_img_size(self.cfg.imgsz, s=stride) # check image size | ||
67 | # Dataloader | ||
68 | bs = 1 # batch_size | ||
69 | im = letterbox(image, imgsz, stride=stride, auto=True)[0] # padded resize | ||
70 | im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB | ||
71 | im = np.ascontiguousarray(im) # contiguous | ||
72 | # Run inference | ||
73 | self.model.warmup(imgsz=(1 if pt or self.model.triton else bs, 3, *imgsz)) # warmup | ||
74 | im = torch.from_numpy(im).to(self.model.device) | ||
75 | im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32 | ||
76 | im /= 255 # 0 - 255 to 0.0 - 1.0 | ||
77 | if len(im.shape) == 3: | ||
78 | im = im[None] # expand for batch dim | ||
79 | # Inference | ||
80 | pred = self.model(im, augment=False, visualize=False) | ||
81 | # NMS | ||
82 | pred = non_max_suppression(pred, self.cfg.conf_thres, self.cfg.iou_thres, None, False, max_det=self.cfg.max_det) | ||
83 | |||
84 | det = pred[0] | ||
85 | # if len(det): | ||
86 | det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], image0.shape).round() | ||
87 | result = gen_result_dict(det.cpu().numpy().tolist()) | ||
88 | return result | ||
89 | |||
90 | def plot(self, image, boxes): | ||
91 | for box in boxes: | ||
92 | cv2.rectangle(image, (box[0], box[1], box[2], box[3]), (0, 0, 255), 2) | ||
93 | return image | ||
94 | |||
95 | |||
96 | if __name__ == "__main__": | ||
97 | img = cv2.imread( | ||
98 | '/home/situ/qfs/invoice_tamper/09_project/project/yolov5_inference/data/images/crop_img/_1594890230.8032346page_10_img_0_hname.jpg') | ||
99 | detector = Yolov5(config) | ||
100 | result = detector.detect(img) | ||
101 | print(result) |
models/__pycache__/__init__.cpython-36.pyc
0 → 100644
No preview for this file type
models/__pycache__/common.cpython-36.pyc
0 → 100644
No preview for this file type
No preview for this file type
models/__pycache__/yolo.cpython-36.pyc
0 → 100644
No preview for this file type
No preview for this file type
models/yolov5_config.py
0 → 100644
1 | from easydict import EasyDict as edict | ||
2 | |||
3 | config = edict( | ||
4 | weights='runs/train/exp/weights/best.pt', # model path or triton URL | ||
5 | data='data/VOC.yaml', # dataset.yaml path | ||
6 | imgsz=(640, 640), # inference size (height, width) | ||
7 | conf_thres=0.5, # confidence threshold | ||
8 | iou_thres=0.45, # NMS IOU threshold | ||
9 | max_det=1000, # maximum detections per image | ||
10 | device='' # cuda device, i.e. 0 or 0,1,2,3 or cpu | ||
11 | ) |
pipeline.py
0 → 100644
File mode changed
plot_sourece_labels.py
0 → 100644
1 | import os | ||
2 | |||
3 | import cv2 | ||
4 | import numpy as np | ||
5 | import pandas as pd | ||
6 | import tqdm | ||
7 | |||
8 | |||
9 | def get_source_image_det(crop_position, predict_positions): | ||
10 | result = [] | ||
11 | x1, y1, x2, y2 = crop_position | ||
12 | for p in predict_positions: | ||
13 | px1, py1, px2, py2,score = p | ||
14 | w, h = px2 - px1, py2 - py1 | ||
15 | result.append([x1 + px1, y1 + py1, x1 + px1 + w, y1 + py1 + h,score]) | ||
16 | return result | ||
17 | |||
18 | |||
19 | def decode_label(image, label_path): | ||
20 | data = open(label_path).readlines() | ||
21 | h, w, c = image.shape | ||
22 | result = [] | ||
23 | for d in data: | ||
24 | d = [float(i) for i in d.strip().split(' ')] | ||
25 | cls, cx, cy, cw, ch,score = d | ||
26 | cx, cy, cw, ch = cx * w, cy * h, cw * w, ch * h | ||
27 | result.append([int(cx - cw // 2), int(cy - ch // 2), int(cx + cw // 2), int(cy + ch // 2),score]) | ||
28 | return result | ||
29 | |||
30 | |||
31 | if __name__ == '__main__': | ||
32 | source_image_path = '/data/situ_invoice_bill_data/new_data/qfs_bank_bill_data/gongshang/authentic/images/val' | ||
33 | val_image_path = '/home/situ/qfs/invoice_tamper/09_project/project/tamper_det/data/images/crop_img' | ||
34 | predict_label_path = '/home/situ/qfs/invoice_tamper/09_project/project/tamper_det/runs/detect/exp/labels' | ||
35 | crop_csv_path = '/data/situ_invoice_bill_data/new_data/qfs_bank_bill_data/gongshang/croped_merge.csv' | ||
36 | predict_labels = os.listdir(predict_label_path) | ||
37 | source_images = os.listdir(source_image_path) | ||
38 | data = pd.read_csv(crop_csv_path) | ||
39 | img_name = data.loc[:, 'img_name'].tolist() | ||
40 | crop_position1 = data.loc[:, 'name_crop_coord'].tolist() | ||
41 | crop_position2 = data.loc[:,'number_crop_coord'].tolist() | ||
42 | cc='/data/situ_invoice_bill_data/new_data/qfs_bank_bill_data/gongshang/tampered/images/val/ps3' | ||
43 | for im in os.listdir(cc): | ||
44 | print(im) | ||
45 | img = cv2.imread(os.path.join(cc,im)) | ||
46 | img_=img.copy() | ||
47 | id = img_name.index(im) | ||
48 | name_crop_position=[int(i) for i in crop_position1[id].split(',')] | ||
49 | number_crop_position=[int(i) for i in crop_position2[id].split(',')] | ||
50 | nx1,ny1,nx2,ny2=name_crop_position | ||
51 | nux1,nuy1,nux2,nuy2=number_crop_position | ||
52 | if im[:-4]+'_hname.txt' in predict_labels: | ||
53 | |||
54 | h, w, c = img[ny1:ny2, nx1:nx2, :].shape | ||
55 | data = open(os.path.join(predict_label_path,im[:-4]+'_hname.txt')).readlines() | ||
56 | for d in data: | ||
57 | cls,cx,cy,cw,ch,score = [float(i) for i in d.strip().split(' ')] | ||
58 | cx,cy,cw,ch=int(cx*w),int(cy*h),int(cw*w),int(ch*h) | ||
59 | cx1,cy1=cx-cw//2,cy-ch//2 | ||
60 | x1,y1,x2,y2=nx1+cx1,ny1+cy1,nx1+cx1+cw,ny1+cy1+ch | ||
61 | cv2.rectangle(img,(x1,y1),(x2,y2),(0,0,255),2) | ||
62 | cv2.putText(img,f'tampered:{score}',(x1,y1-5),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),1) | ||
63 | if im[:-4] + '_hnumber.txt' in predict_labels: | ||
64 | h, w, c = img[nuy1:nuy2, nux1:nux2, :].shape | ||
65 | data = open(os.path.join(predict_label_path, im[:-4] + '_hname.txt')).readlines() | ||
66 | for d in data: | ||
67 | cls, cx, cy, cw, ch, score = [float(i) for i in d.strip().split(' ')] | ||
68 | cx, cy, cw, ch = int(cx * w), int(cy * h), int(cw * w), int(ch * h) | ||
69 | cx1, cy1 = cx - cw // 2, cy - ch // 2 | ||
70 | x1, y1, x2, y2 = nux1 + cx1, nuy1 + cy1, nux1 + cx1 + cw, nuy1 + cy1 + ch | ||
71 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2) | ||
72 | cv2.putText(img, f'tampered:{score}', (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) | ||
73 | result = np.vstack((img_,img)) | ||
74 | cv2.imwrite(f'z/{im}',result) |
segment/predict.py
deleted
100644 → 0
This diff is collapsed.
Click to expand it.
segment/train.py
deleted
100644 → 0
This diff is collapsed.
Click to expand it.
segment/val.py
deleted
100644 → 0
This diff is collapsed.
Click to expand it.
setup.cfg
deleted
100644 → 0
1 | # Project-wide configuration file, can be used for package metadata and other toll configurations | ||
2 | # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments | ||
3 | # Local usage: pip install pre-commit, pre-commit run --all-files | ||
4 | |||
5 | [metadata] | ||
6 | license_file = LICENSE | ||
7 | description_file = README.md | ||
8 | |||
9 | |||
10 | [tool:pytest] | ||
11 | norecursedirs = | ||
12 | .git | ||
13 | dist | ||
14 | build | ||
15 | addopts = | ||
16 | --doctest-modules | ||
17 | --durations=25 | ||
18 | --color=yes | ||
19 | |||
20 | |||
21 | [flake8] | ||
22 | max-line-length = 120 | ||
23 | exclude = .tox,*.egg,build,temp | ||
24 | select = E,W,F | ||
25 | doctests = True | ||
26 | verbose = 2 | ||
27 | # https://pep8.readthedocs.io/en/latest/intro.html#error-codes | ||
28 | format = pylint | ||
29 | # see: https://www.flake8rules.com/ | ||
30 | ignore = | ||
31 | E731 # Do not assign a lambda expression, use a def | ||
32 | F405 # name may be undefined, or defined from star imports: module | ||
33 | E402 # module level import not at top of file | ||
34 | F401 # module imported but unused | ||
35 | W504 # line break after binary operator | ||
36 | E127 # continuation line over-indented for visual indent | ||
37 | E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ | ||
38 | E501 # line too long | ||
39 | F403 # ‘from module import *’ used; unable to detect undefined names | ||
40 | |||
41 | |||
42 | [isort] | ||
43 | # https://pycqa.github.io/isort/docs/configuration/options.html | ||
44 | line_length = 120 | ||
45 | # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html | ||
46 | multi_line_output = 0 | ||
47 | |||
48 | |||
49 | [yapf] | ||
50 | based_on_style = pep8 | ||
51 | spaces_before_comment = 2 | ||
52 | COLUMN_LIMIT = 120 | ||
53 | COALESCE_BRACKETS = True | ||
54 | SPACES_AROUND_POWER_OPERATOR = True | ||
55 | SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False | ||
56 | SPLIT_BEFORE_CLOSING_BRACKET = False | ||
57 | SPLIT_BEFORE_FIRST_ARGUMENT = False | ||
58 | # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False |
utils/__pycache__/__init__.cpython-36.pyc
0 → 100644
No preview for this file type
No preview for this file type
utils/__pycache__/autoanchor.cpython-36.pyc
0 → 100644
No preview for this file type
utils/__pycache__/dataloaders.cpython-36.pyc
0 → 100644
No preview for this file type
utils/__pycache__/downloads.cpython-36.pyc
0 → 100644
No preview for this file type
utils/__pycache__/general.cpython-36.pyc
0 → 100644
No preview for this file type
utils/__pycache__/metrics.cpython-36.pyc
0 → 100644
No preview for this file type
utils/__pycache__/plots.cpython-36.pyc
0 → 100644
No preview for this file type
utils/__pycache__/torch_utils.cpython-36.pyc
0 → 100644
No preview for this file type
No preview for this file type
No preview for this file type
-
Please register or sign in to post a comment