submit code
0 parents
Showing
129 changed files
with
4900 additions
and
0 deletions
.gitignore
0 → 100644
.idea/.gitignore
0 → 100644
.idea/inspectionProfiles/Project_Default.xml
0 → 100644
1 | <component name="InspectionProjectProfileManager"> | ||
2 | <profile version="1.0"> | ||
3 | <option name="myName" value="Project Default" /> | ||
4 | <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true"> | ||
5 | <option name="ignoredPackages"> | ||
6 | <value> | ||
7 | <list size="2"> | ||
8 | <item index="0" class="java.lang.String" itemvalue="psutil" /> | ||
9 | <item index="1" class="java.lang.String" itemvalue="thop" /> | ||
10 | </list> | ||
11 | </value> | ||
12 | </option> | ||
13 | </inspection_tool> | ||
14 | </profile> | ||
15 | </component> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/misc.xml
0 → 100644
.idea/modules.xml
0 → 100644
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <project version="4"> | ||
3 | <component name="ProjectModuleManager"> | ||
4 | <modules> | ||
5 | <module fileurl="file://$PROJECT_DIR$/.idea/tamper_det.iml" filepath="$PROJECT_DIR$/.idea/tamper_det.iml" /> | ||
6 | </modules> | ||
7 | </component> | ||
8 | </project> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/tamper_det.iml
0 → 100644
1 | <?xml version="1.0" encoding="UTF-8"?> | ||
2 | <module type="PYTHON_MODULE" version="4"> | ||
3 | <component name="NewModuleRootManager"> | ||
4 | <content url="file://$MODULE_DIR$" /> | ||
5 | <orderEntry type="jdk" jdkName="Python 3.6 (workenv)" jdkType="Python SDK" /> | ||
6 | <orderEntry type="sourceFolder" forTests="false" /> | ||
7 | </component> | ||
8 | <component name="PyDocumentationSettings"> | ||
9 | <option name="format" value="GOOGLE" /> | ||
10 | <option name="myDocStringFormat" value="Google" /> | ||
11 | </component> | ||
12 | </module> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
.idea/vcs.xml
0 → 100644
README.md
0 → 100644
bank_ocr_inference.py
0 → 100644
This diff is collapsed.
Click to expand it.
benchmarks.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Run YOLOv5 benchmarks on all supported export formats | ||
4 | |||
5 | Format | `export.py --include` | Model | ||
6 | --- | --- | --- | ||
7 | PyTorch | - | yolov5s.pt | ||
8 | TorchScript | `torchscript` | yolov5s.torchscript | ||
9 | ONNX | `onnx` | yolov5s.onnx | ||
10 | OpenVINO | `openvino` | yolov5s_openvino_model/ | ||
11 | TensorRT | `engine` | yolov5s.engine | ||
12 | CoreML | `coreml` | yolov5s.mlmodel | ||
13 | TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ | ||
14 | TensorFlow GraphDef | `pb` | yolov5s.pb | ||
15 | TensorFlow Lite | `tflite` | yolov5s.tflite | ||
16 | TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite | ||
17 | TensorFlow.js | `tfjs` | yolov5s_web_model/ | ||
18 | |||
19 | Requirements: | ||
20 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU | ||
21 | $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU | ||
22 | $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT | ||
23 | |||
24 | Usage: | ||
25 | $ python utils/benchmarks.py --weights yolov5s.pt --img 640 | ||
26 | """ | ||
27 | |||
28 | import argparse | ||
29 | import platform | ||
30 | import sys | ||
31 | import time | ||
32 | from pathlib import Path | ||
33 | |||
34 | import pandas as pd | ||
35 | |||
36 | FILE = Path(__file__).resolve() | ||
37 | ROOT = FILE.parents[0] # YOLOv5 root directory | ||
38 | if str(ROOT) not in sys.path: | ||
39 | sys.path.append(str(ROOT)) # add ROOT to PATH | ||
40 | # ROOT = ROOT.relative_to(Path.cwd()) # relative | ||
41 | |||
42 | import export | ||
43 | from models.experimental import attempt_load | ||
44 | from models.yolo import SegmentationModel | ||
45 | from segment.val import run as val_seg | ||
46 | from utils import notebook_init | ||
47 | from utils.general import LOGGER, check_yaml, file_size, print_args | ||
48 | from utils.torch_utils import select_device | ||
49 | from val import run as val_det | ||
50 | |||
51 | |||
52 | def run( | ||
53 | weights=ROOT / 'yolov5s.pt', # weights path | ||
54 | imgsz=640, # inference size (pixels) | ||
55 | batch_size=1, # batch size | ||
56 | data=ROOT / 'data/coco128.yaml', # dataset.yaml path | ||
57 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu | ||
58 | half=False, # use FP16 half-precision inference | ||
59 | test=False, # test exports only | ||
60 | pt_only=False, # test PyTorch only | ||
61 | hard_fail=False, # throw error on benchmark failure | ||
62 | ): | ||
63 | y, t = [], time.time() | ||
64 | device = select_device(device) | ||
65 | model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. | ||
66 | for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) | ||
67 | try: | ||
68 | assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported | ||
69 | assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML | ||
70 | if 'cpu' in device.type: | ||
71 | assert cpu, 'inference not supported on CPU' | ||
72 | if 'cuda' in device.type: | ||
73 | assert gpu, 'inference not supported on GPU' | ||
74 | |||
75 | # Export | ||
76 | if f == '-': | ||
77 | w = weights # PyTorch format | ||
78 | else: | ||
79 | w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others | ||
80 | assert suffix in str(w), 'export failed' | ||
81 | |||
82 | # Validate | ||
83 | if model_type == SegmentationModel: | ||
84 | result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) | ||
85 | metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) | ||
86 | else: # DetectionModel: | ||
87 | result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) | ||
88 | metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) | ||
89 | speed = result[2][1] # times (preprocess, inference, postprocess) | ||
90 | y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference | ||
91 | except Exception as e: | ||
92 | if hard_fail: | ||
93 | assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' | ||
94 | LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') | ||
95 | y.append([name, None, None, None]) # mAP, t_inference | ||
96 | if pt_only and i == 0: | ||
97 | break # break after PyTorch | ||
98 | |||
99 | # Print results | ||
100 | LOGGER.info('\n') | ||
101 | parse_opt() | ||
102 | notebook_init() # print system info | ||
103 | c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] | ||
104 | py = pd.DataFrame(y, columns=c) | ||
105 | LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') | ||
106 | LOGGER.info(str(py if map else py.iloc[:, :2])) | ||
107 | if hard_fail and isinstance(hard_fail, str): | ||
108 | metrics = py['mAP50-95'].array # values to compare to floor | ||
109 | floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n | ||
110 | assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' | ||
111 | return py | ||
112 | |||
113 | |||
114 | def test( | ||
115 | weights=ROOT / 'yolov5s.pt', # weights path | ||
116 | imgsz=640, # inference size (pixels) | ||
117 | batch_size=1, # batch size | ||
118 | data=ROOT / 'data/coco128.yaml', # dataset.yaml path | ||
119 | device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu | ||
120 | half=False, # use FP16 half-precision inference | ||
121 | test=False, # test exports only | ||
122 | pt_only=False, # test PyTorch only | ||
123 | hard_fail=False, # throw error on benchmark failure | ||
124 | ): | ||
125 | y, t = [], time.time() | ||
126 | device = select_device(device) | ||
127 | for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) | ||
128 | try: | ||
129 | w = weights if f == '-' else \ | ||
130 | export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights | ||
131 | assert suffix in str(w), 'export failed' | ||
132 | y.append([name, True]) | ||
133 | except Exception: | ||
134 | y.append([name, False]) # mAP, t_inference | ||
135 | |||
136 | # Print results | ||
137 | LOGGER.info('\n') | ||
138 | parse_opt() | ||
139 | notebook_init() # print system info | ||
140 | py = pd.DataFrame(y, columns=['Format', 'Export']) | ||
141 | LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') | ||
142 | LOGGER.info(str(py)) | ||
143 | return py | ||
144 | |||
145 | |||
146 | def parse_opt(): | ||
147 | parser = argparse.ArgumentParser() | ||
148 | parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') | ||
149 | parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') | ||
150 | parser.add_argument('--batch-size', type=int, default=1, help='batch size') | ||
151 | parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') | ||
152 | parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ||
153 | parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') | ||
154 | parser.add_argument('--test', action='store_true', help='test exports only') | ||
155 | parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') | ||
156 | parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') | ||
157 | opt = parser.parse_args() | ||
158 | opt.data = check_yaml(opt.data) # check YAML | ||
159 | print_args(vars(opt)) | ||
160 | return opt | ||
161 | |||
162 | |||
163 | def main(opt): | ||
164 | test(**vars(opt)) if opt.test else run(**vars(opt)) | ||
165 | |||
166 | |||
167 | if __name__ == "__main__": | ||
168 | opt = parse_opt() | ||
169 | main(opt) |
data/Argoverse.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI | ||
3 | # Example usage: python train.py --data Argoverse.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── Argoverse ← downloads here (31.3 GB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/Argoverse # dataset root dir | ||
12 | train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images | ||
13 | val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images | ||
14 | test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: person | ||
19 | 1: bicycle | ||
20 | 2: car | ||
21 | 3: motorcycle | ||
22 | 4: bus | ||
23 | 5: truck | ||
24 | 6: traffic_light | ||
25 | 7: stop_sign | ||
26 | |||
27 | |||
28 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
29 | download: | | ||
30 | import json | ||
31 | |||
32 | from tqdm import tqdm | ||
33 | from utils.general import download, Path | ||
34 | |||
35 | |||
36 | def argoverse2yolo(set): | ||
37 | labels = {} | ||
38 | a = json.load(open(set, "rb")) | ||
39 | for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): | ||
40 | img_id = annot['image_id'] | ||
41 | img_name = a['images'][img_id]['name'] | ||
42 | img_label_name = f'{img_name[:-3]}txt' | ||
43 | |||
44 | cls = annot['category_id'] # instance class id | ||
45 | x_center, y_center, width, height = annot['bbox'] | ||
46 | x_center = (x_center + width / 2) / 1920.0 # offset and scale | ||
47 | y_center = (y_center + height / 2) / 1200.0 # offset and scale | ||
48 | width /= 1920.0 # scale | ||
49 | height /= 1200.0 # scale | ||
50 | |||
51 | img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] | ||
52 | if not img_dir.exists(): | ||
53 | img_dir.mkdir(parents=True, exist_ok=True) | ||
54 | |||
55 | k = str(img_dir / img_label_name) | ||
56 | if k not in labels: | ||
57 | labels[k] = [] | ||
58 | labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") | ||
59 | |||
60 | for k in labels: | ||
61 | with open(k, "w") as f: | ||
62 | f.writelines(labels[k]) | ||
63 | |||
64 | |||
65 | # Download | ||
66 | dir = Path('../datasets/Argoverse') # dataset root dir | ||
67 | urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] | ||
68 | download(urls, dir=dir, delete=False) | ||
69 | |||
70 | # Convert | ||
71 | annotations_dir = 'Argoverse-HD/annotations/' | ||
72 | (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' | ||
73 | for d in "train.json", "val.json": | ||
74 | argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels |
data/GlobalWheat2020.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan | ||
3 | # Example usage: python train.py --data GlobalWheat2020.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── GlobalWheat2020 ← downloads here (7.0 GB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/GlobalWheat2020 # dataset root dir | ||
12 | train: # train images (relative to 'path') 3422 images | ||
13 | - images/arvalis_1 | ||
14 | - images/arvalis_2 | ||
15 | - images/arvalis_3 | ||
16 | - images/ethz_1 | ||
17 | - images/rres_1 | ||
18 | - images/inrae_1 | ||
19 | - images/usask_1 | ||
20 | val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) | ||
21 | - images/ethz_1 | ||
22 | test: # test images (optional) 1276 images | ||
23 | - images/utokyo_1 | ||
24 | - images/utokyo_2 | ||
25 | - images/nau_1 | ||
26 | - images/uq_1 | ||
27 | |||
28 | # Classes | ||
29 | names: | ||
30 | 0: wheat_head | ||
31 | |||
32 | |||
33 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
34 | download: | | ||
35 | from utils.general import download, Path | ||
36 | |||
37 | |||
38 | # Download | ||
39 | dir = Path(yaml['path']) # dataset root dir | ||
40 | urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', | ||
41 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] | ||
42 | download(urls, dir=dir) | ||
43 | |||
44 | # Make Directories | ||
45 | for p in 'annotations', 'images', 'labels': | ||
46 | (dir / p).mkdir(parents=True, exist_ok=True) | ||
47 | |||
48 | # Move | ||
49 | for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ | ||
50 | 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': | ||
51 | (dir / p).rename(dir / 'images' / p) # move to /images | ||
52 | f = (dir / p).with_suffix('.json') # json file | ||
53 | if f.exists(): | ||
54 | f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations |
data/ImageNet.yaml
0 → 100644
This diff is collapsed.
Click to expand it.
data/Objects365.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Objects365 dataset https://www.objects365.org/ by Megvii | ||
3 | # Example usage: python train.py --data Objects365.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/Objects365 # dataset root dir | ||
12 | train: images/train # train images (relative to 'path') 1742289 images | ||
13 | val: images/val # val images (relative to 'path') 80000 images | ||
14 | test: # test images (optional) | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: Person | ||
19 | 1: Sneakers | ||
20 | 2: Chair | ||
21 | 3: Other Shoes | ||
22 | 4: Hat | ||
23 | 5: Car | ||
24 | 6: Lamp | ||
25 | 7: Glasses | ||
26 | 8: Bottle | ||
27 | 9: Desk | ||
28 | 10: Cup | ||
29 | 11: Street Lights | ||
30 | 12: Cabinet/shelf | ||
31 | 13: Handbag/Satchel | ||
32 | 14: Bracelet | ||
33 | 15: Plate | ||
34 | 16: Picture/Frame | ||
35 | 17: Helmet | ||
36 | 18: Book | ||
37 | 19: Gloves | ||
38 | 20: Storage box | ||
39 | 21: Boat | ||
40 | 22: Leather Shoes | ||
41 | 23: Flower | ||
42 | 24: Bench | ||
43 | 25: Potted Plant | ||
44 | 26: Bowl/Basin | ||
45 | 27: Flag | ||
46 | 28: Pillow | ||
47 | 29: Boots | ||
48 | 30: Vase | ||
49 | 31: Microphone | ||
50 | 32: Necklace | ||
51 | 33: Ring | ||
52 | 34: SUV | ||
53 | 35: Wine Glass | ||
54 | 36: Belt | ||
55 | 37: Monitor/TV | ||
56 | 38: Backpack | ||
57 | 39: Umbrella | ||
58 | 40: Traffic Light | ||
59 | 41: Speaker | ||
60 | 42: Watch | ||
61 | 43: Tie | ||
62 | 44: Trash bin Can | ||
63 | 45: Slippers | ||
64 | 46: Bicycle | ||
65 | 47: Stool | ||
66 | 48: Barrel/bucket | ||
67 | 49: Van | ||
68 | 50: Couch | ||
69 | 51: Sandals | ||
70 | 52: Basket | ||
71 | 53: Drum | ||
72 | 54: Pen/Pencil | ||
73 | 55: Bus | ||
74 | 56: Wild Bird | ||
75 | 57: High Heels | ||
76 | 58: Motorcycle | ||
77 | 59: Guitar | ||
78 | 60: Carpet | ||
79 | 61: Cell Phone | ||
80 | 62: Bread | ||
81 | 63: Camera | ||
82 | 64: Canned | ||
83 | 65: Truck | ||
84 | 66: Traffic cone | ||
85 | 67: Cymbal | ||
86 | 68: Lifesaver | ||
87 | 69: Towel | ||
88 | 70: Stuffed Toy | ||
89 | 71: Candle | ||
90 | 72: Sailboat | ||
91 | 73: Laptop | ||
92 | 74: Awning | ||
93 | 75: Bed | ||
94 | 76: Faucet | ||
95 | 77: Tent | ||
96 | 78: Horse | ||
97 | 79: Mirror | ||
98 | 80: Power outlet | ||
99 | 81: Sink | ||
100 | 82: Apple | ||
101 | 83: Air Conditioner | ||
102 | 84: Knife | ||
103 | 85: Hockey Stick | ||
104 | 86: Paddle | ||
105 | 87: Pickup Truck | ||
106 | 88: Fork | ||
107 | 89: Traffic Sign | ||
108 | 90: Balloon | ||
109 | 91: Tripod | ||
110 | 92: Dog | ||
111 | 93: Spoon | ||
112 | 94: Clock | ||
113 | 95: Pot | ||
114 | 96: Cow | ||
115 | 97: Cake | ||
116 | 98: Dinning Table | ||
117 | 99: Sheep | ||
118 | 100: Hanger | ||
119 | 101: Blackboard/Whiteboard | ||
120 | 102: Napkin | ||
121 | 103: Other Fish | ||
122 | 104: Orange/Tangerine | ||
123 | 105: Toiletry | ||
124 | 106: Keyboard | ||
125 | 107: Tomato | ||
126 | 108: Lantern | ||
127 | 109: Machinery Vehicle | ||
128 | 110: Fan | ||
129 | 111: Green Vegetables | ||
130 | 112: Banana | ||
131 | 113: Baseball Glove | ||
132 | 114: Airplane | ||
133 | 115: Mouse | ||
134 | 116: Train | ||
135 | 117: Pumpkin | ||
136 | 118: Soccer | ||
137 | 119: Skiboard | ||
138 | 120: Luggage | ||
139 | 121: Nightstand | ||
140 | 122: Tea pot | ||
141 | 123: Telephone | ||
142 | 124: Trolley | ||
143 | 125: Head Phone | ||
144 | 126: Sports Car | ||
145 | 127: Stop Sign | ||
146 | 128: Dessert | ||
147 | 129: Scooter | ||
148 | 130: Stroller | ||
149 | 131: Crane | ||
150 | 132: Remote | ||
151 | 133: Refrigerator | ||
152 | 134: Oven | ||
153 | 135: Lemon | ||
154 | 136: Duck | ||
155 | 137: Baseball Bat | ||
156 | 138: Surveillance Camera | ||
157 | 139: Cat | ||
158 | 140: Jug | ||
159 | 141: Broccoli | ||
160 | 142: Piano | ||
161 | 143: Pizza | ||
162 | 144: Elephant | ||
163 | 145: Skateboard | ||
164 | 146: Surfboard | ||
165 | 147: Gun | ||
166 | 148: Skating and Skiing shoes | ||
167 | 149: Gas stove | ||
168 | 150: Donut | ||
169 | 151: Bow Tie | ||
170 | 152: Carrot | ||
171 | 153: Toilet | ||
172 | 154: Kite | ||
173 | 155: Strawberry | ||
174 | 156: Other Balls | ||
175 | 157: Shovel | ||
176 | 158: Pepper | ||
177 | 159: Computer Box | ||
178 | 160: Toilet Paper | ||
179 | 161: Cleaning Products | ||
180 | 162: Chopsticks | ||
181 | 163: Microwave | ||
182 | 164: Pigeon | ||
183 | 165: Baseball | ||
184 | 166: Cutting/chopping Board | ||
185 | 167: Coffee Table | ||
186 | 168: Side Table | ||
187 | 169: Scissors | ||
188 | 170: Marker | ||
189 | 171: Pie | ||
190 | 172: Ladder | ||
191 | 173: Snowboard | ||
192 | 174: Cookies | ||
193 | 175: Radiator | ||
194 | 176: Fire Hydrant | ||
195 | 177: Basketball | ||
196 | 178: Zebra | ||
197 | 179: Grape | ||
198 | 180: Giraffe | ||
199 | 181: Potato | ||
200 | 182: Sausage | ||
201 | 183: Tricycle | ||
202 | 184: Violin | ||
203 | 185: Egg | ||
204 | 186: Fire Extinguisher | ||
205 | 187: Candy | ||
206 | 188: Fire Truck | ||
207 | 189: Billiards | ||
208 | 190: Converter | ||
209 | 191: Bathtub | ||
210 | 192: Wheelchair | ||
211 | 193: Golf Club | ||
212 | 194: Briefcase | ||
213 | 195: Cucumber | ||
214 | 196: Cigar/Cigarette | ||
215 | 197: Paint Brush | ||
216 | 198: Pear | ||
217 | 199: Heavy Truck | ||
218 | 200: Hamburger | ||
219 | 201: Extractor | ||
220 | 202: Extension Cord | ||
221 | 203: Tong | ||
222 | 204: Tennis Racket | ||
223 | 205: Folder | ||
224 | 206: American Football | ||
225 | 207: earphone | ||
226 | 208: Mask | ||
227 | 209: Kettle | ||
228 | 210: Tennis | ||
229 | 211: Ship | ||
230 | 212: Swing | ||
231 | 213: Coffee Machine | ||
232 | 214: Slide | ||
233 | 215: Carriage | ||
234 | 216: Onion | ||
235 | 217: Green beans | ||
236 | 218: Projector | ||
237 | 219: Frisbee | ||
238 | 220: Washing Machine/Drying Machine | ||
239 | 221: Chicken | ||
240 | 222: Printer | ||
241 | 223: Watermelon | ||
242 | 224: Saxophone | ||
243 | 225: Tissue | ||
244 | 226: Toothbrush | ||
245 | 227: Ice cream | ||
246 | 228: Hot-air balloon | ||
247 | 229: Cello | ||
248 | 230: French Fries | ||
249 | 231: Scale | ||
250 | 232: Trophy | ||
251 | 233: Cabbage | ||
252 | 234: Hot dog | ||
253 | 235: Blender | ||
254 | 236: Peach | ||
255 | 237: Rice | ||
256 | 238: Wallet/Purse | ||
257 | 239: Volleyball | ||
258 | 240: Deer | ||
259 | 241: Goose | ||
260 | 242: Tape | ||
261 | 243: Tablet | ||
262 | 244: Cosmetics | ||
263 | 245: Trumpet | ||
264 | 246: Pineapple | ||
265 | 247: Golf Ball | ||
266 | 248: Ambulance | ||
267 | 249: Parking meter | ||
268 | 250: Mango | ||
269 | 251: Key | ||
270 | 252: Hurdle | ||
271 | 253: Fishing Rod | ||
272 | 254: Medal | ||
273 | 255: Flute | ||
274 | 256: Brush | ||
275 | 257: Penguin | ||
276 | 258: Megaphone | ||
277 | 259: Corn | ||
278 | 260: Lettuce | ||
279 | 261: Garlic | ||
280 | 262: Swan | ||
281 | 263: Helicopter | ||
282 | 264: Green Onion | ||
283 | 265: Sandwich | ||
284 | 266: Nuts | ||
285 | 267: Speed Limit Sign | ||
286 | 268: Induction Cooker | ||
287 | 269: Broom | ||
288 | 270: Trombone | ||
289 | 271: Plum | ||
290 | 272: Rickshaw | ||
291 | 273: Goldfish | ||
292 | 274: Kiwi fruit | ||
293 | 275: Router/modem | ||
294 | 276: Poker Card | ||
295 | 277: Toaster | ||
296 | 278: Shrimp | ||
297 | 279: Sushi | ||
298 | 280: Cheese | ||
299 | 281: Notepaper | ||
300 | 282: Cherry | ||
301 | 283: Pliers | ||
302 | 284: CD | ||
303 | 285: Pasta | ||
304 | 286: Hammer | ||
305 | 287: Cue | ||
306 | 288: Avocado | ||
307 | 289: Hamimelon | ||
308 | 290: Flask | ||
309 | 291: Mushroom | ||
310 | 292: Screwdriver | ||
311 | 293: Soap | ||
312 | 294: Recorder | ||
313 | 295: Bear | ||
314 | 296: Eggplant | ||
315 | 297: Board Eraser | ||
316 | 298: Coconut | ||
317 | 299: Tape Measure/Ruler | ||
318 | 300: Pig | ||
319 | 301: Showerhead | ||
320 | 302: Globe | ||
321 | 303: Chips | ||
322 | 304: Steak | ||
323 | 305: Crosswalk Sign | ||
324 | 306: Stapler | ||
325 | 307: Camel | ||
326 | 308: Formula 1 | ||
327 | 309: Pomegranate | ||
328 | 310: Dishwasher | ||
329 | 311: Crab | ||
330 | 312: Hoverboard | ||
331 | 313: Meat ball | ||
332 | 314: Rice Cooker | ||
333 | 315: Tuba | ||
334 | 316: Calculator | ||
335 | 317: Papaya | ||
336 | 318: Antelope | ||
337 | 319: Parrot | ||
338 | 320: Seal | ||
339 | 321: Butterfly | ||
340 | 322: Dumbbell | ||
341 | 323: Donkey | ||
342 | 324: Lion | ||
343 | 325: Urinal | ||
344 | 326: Dolphin | ||
345 | 327: Electric Drill | ||
346 | 328: Hair Dryer | ||
347 | 329: Egg tart | ||
348 | 330: Jellyfish | ||
349 | 331: Treadmill | ||
350 | 332: Lighter | ||
351 | 333: Grapefruit | ||
352 | 334: Game board | ||
353 | 335: Mop | ||
354 | 336: Radish | ||
355 | 337: Baozi | ||
356 | 338: Target | ||
357 | 339: French | ||
358 | 340: Spring Rolls | ||
359 | 341: Monkey | ||
360 | 342: Rabbit | ||
361 | 343: Pencil Case | ||
362 | 344: Yak | ||
363 | 345: Red Cabbage | ||
364 | 346: Binoculars | ||
365 | 347: Asparagus | ||
366 | 348: Barbell | ||
367 | 349: Scallop | ||
368 | 350: Noddles | ||
369 | 351: Comb | ||
370 | 352: Dumpling | ||
371 | 353: Oyster | ||
372 | 354: Table Tennis paddle | ||
373 | 355: Cosmetics Brush/Eyeliner Pencil | ||
374 | 356: Chainsaw | ||
375 | 357: Eraser | ||
376 | 358: Lobster | ||
377 | 359: Durian | ||
378 | 360: Okra | ||
379 | 361: Lipstick | ||
380 | 362: Cosmetics Mirror | ||
381 | 363: Curling | ||
382 | 364: Table Tennis | ||
383 | |||
384 | |||
385 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
386 | download: | | ||
387 | from tqdm import tqdm | ||
388 | |||
389 | from utils.general import Path, check_requirements, download, np, xyxy2xywhn | ||
390 | |||
391 | check_requirements(('pycocotools>=2.0',)) | ||
392 | from pycocotools.coco import COCO | ||
393 | |||
394 | # Make Directories | ||
395 | dir = Path(yaml['path']) # dataset root dir | ||
396 | for p in 'images', 'labels': | ||
397 | (dir / p).mkdir(parents=True, exist_ok=True) | ||
398 | for q in 'train', 'val': | ||
399 | (dir / p / q).mkdir(parents=True, exist_ok=True) | ||
400 | |||
401 | # Train, Val Splits | ||
402 | for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: | ||
403 | print(f"Processing {split} in {patches} patches ...") | ||
404 | images, labels = dir / 'images' / split, dir / 'labels' / split | ||
405 | |||
406 | # Download | ||
407 | url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" | ||
408 | if split == 'train': | ||
409 | download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json | ||
410 | download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8) | ||
411 | elif split == 'val': | ||
412 | download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json | ||
413 | download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) | ||
414 | download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) | ||
415 | |||
416 | # Move | ||
417 | for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): | ||
418 | f.rename(images / f.name) # move to /images/{split} | ||
419 | |||
420 | # Labels | ||
421 | coco = COCO(dir / f'zhiyuan_objv2_{split}.json') | ||
422 | names = [x["name"] for x in coco.loadCats(coco.getCatIds())] | ||
423 | for cid, cat in enumerate(names): | ||
424 | catIds = coco.getCatIds(catNms=[cat]) | ||
425 | imgIds = coco.getImgIds(catIds=catIds) | ||
426 | for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): | ||
427 | width, height = im["width"], im["height"] | ||
428 | path = Path(im["file_name"]) # image filename | ||
429 | try: | ||
430 | with open(labels / path.with_suffix('.txt').name, 'a') as file: | ||
431 | annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) | ||
432 | for a in coco.loadAnns(annIds): | ||
433 | x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) | ||
434 | xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) | ||
435 | x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped | ||
436 | file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n") | ||
437 | except Exception as e: | ||
438 | print(e) |
data/SKU-110K.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail | ||
3 | # Example usage: python train.py --data SKU-110K.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── SKU-110K ← downloads here (13.6 GB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/SKU-110K # dataset root dir | ||
12 | train: train.txt # train images (relative to 'path') 8219 images | ||
13 | val: val.txt # val images (relative to 'path') 588 images | ||
14 | test: test.txt # test images (optional) 2936 images | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: object | ||
19 | |||
20 | |||
21 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
22 | download: | | ||
23 | import shutil | ||
24 | from tqdm import tqdm | ||
25 | from utils.general import np, pd, Path, download, xyxy2xywh | ||
26 | |||
27 | |||
28 | # Download | ||
29 | dir = Path(yaml['path']) # dataset root dir | ||
30 | parent = Path(dir.parent) # download dir | ||
31 | urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] | ||
32 | download(urls, dir=parent, delete=False) | ||
33 | |||
34 | # Rename directories | ||
35 | if dir.exists(): | ||
36 | shutil.rmtree(dir) | ||
37 | (parent / 'SKU110K_fixed').rename(dir) # rename dir | ||
38 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir | ||
39 | |||
40 | # Convert labels | ||
41 | names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names | ||
42 | for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': | ||
43 | x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations | ||
44 | images, unique_images = x[:, 0], np.unique(x[:, 0]) | ||
45 | with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: | ||
46 | f.writelines(f'./images/{s}\n' for s in unique_images) | ||
47 | for im in tqdm(unique_images, desc=f'Converting {dir / d}'): | ||
48 | cls = 0 # single-class dataset | ||
49 | with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: | ||
50 | for r in x[images == im]: | ||
51 | w, h = r[6], r[7] # image width, height | ||
52 | xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance | ||
53 | f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label |
data/VOC.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford | ||
3 | # Example usage: python train.py --data VOC.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── VOC ← downloads here (2.8 GB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: /home/qfs/WorkSpace/ps_tamper/yolov5_ps/VOCdevkit | ||
12 | train: # train images (relative to 'path') 16551 images | ||
13 | - images/train | ||
14 | val: # val images (relative to 'path') 4952 images | ||
15 | - images/val | ||
16 | test: # test images (optional) | ||
17 | - images/val | ||
18 | |||
19 | # Classes | ||
20 | names: | ||
21 | |||
22 | 0: tampered | ||
23 | #1: bicycle | ||
24 | #2: bird | ||
25 | #3: boat | ||
26 | #4: bottle | ||
27 | #5: bus | ||
28 | #6: car | ||
29 | #7: cat | ||
30 | #8: chair | ||
31 | #9: cow | ||
32 | #10: diningtable | ||
33 | #11: dog | ||
34 | #12: horse | ||
35 | #13: motorbike | ||
36 | #14: person | ||
37 | #15: pottedplant | ||
38 | #16: sheep | ||
39 | #17: sofa | ||
40 | # 18: train | ||
41 | # 19: tvmonitor | ||
42 | |||
43 | |||
44 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
45 | download: | | ||
46 | import xml.etree.ElementTree as ET | ||
47 | |||
48 | from tqdm import tqdm | ||
49 | from utils.general import download, Path | ||
50 | |||
51 | |||
52 | def convert_label(path, lb_path, year, image_id): | ||
53 | def convert_box(size, box): | ||
54 | dw, dh = 1. / size[0], 1. / size[1] | ||
55 | x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] | ||
56 | return x * dw, y * dh, w * dw, h * dh | ||
57 | |||
58 | in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') | ||
59 | out_file = open(lb_path, 'w') | ||
60 | tree = ET.parse(in_file) | ||
61 | root = tree.getroot() | ||
62 | size = root.find('size') | ||
63 | w = int(size.find('width').text) | ||
64 | h = int(size.find('height').text) | ||
65 | |||
66 | names = list(yaml['names'].values()) # names list | ||
67 | for obj in root.iter('object'): | ||
68 | cls = obj.find('name').text | ||
69 | if cls in names and int(obj.find('difficult').text) != 1: | ||
70 | xmlbox = obj.find('bndbox') | ||
71 | bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) | ||
72 | cls_id = names.index(cls) # class id | ||
73 | out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') | ||
74 | |||
75 | |||
76 | # Download | ||
77 | dir = Path(yaml['path']) # dataset root dir | ||
78 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' | ||
79 | urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images | ||
80 | f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images | ||
81 | f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images | ||
82 | download(urls, dir=dir / 'images', delete=False, curl=True, threads=3) | ||
83 | |||
84 | # Convert | ||
85 | path = dir / 'images/VOCdevkit' | ||
86 | for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): | ||
87 | imgs_path = dir / 'images' / f'{image_set}{year}' | ||
88 | lbs_path = dir / 'labels' / f'{image_set}{year}' | ||
89 | imgs_path.mkdir(exist_ok=True, parents=True) | ||
90 | lbs_path.mkdir(exist_ok=True, parents=True) | ||
91 | |||
92 | with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f: | ||
93 | image_ids = f.read().strip().split() | ||
94 | for id in tqdm(image_ids, desc=f'{image_set}{year}'): | ||
95 | f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path | ||
96 | lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path | ||
97 | f.rename(imgs_path / f.name) # move image | ||
98 | convert_label(path, lb_path, year, id) # convert labels to YOLO format |
data/VisDrone.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University | ||
3 | # Example usage: python train.py --data VisDrone.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── VisDrone ← downloads here (2.3 GB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/VisDrone # dataset root dir | ||
12 | train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images | ||
13 | val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images | ||
14 | test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: pedestrian | ||
19 | 1: people | ||
20 | 2: bicycle | ||
21 | 3: car | ||
22 | 4: van | ||
23 | 5: truck | ||
24 | 6: tricycle | ||
25 | 7: awning-tricycle | ||
26 | 8: bus | ||
27 | 9: motor | ||
28 | |||
29 | |||
30 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
31 | download: | | ||
32 | from utils.general import download, os, Path | ||
33 | |||
34 | def visdrone2yolo(dir): | ||
35 | from PIL import Image | ||
36 | from tqdm import tqdm | ||
37 | |||
38 | def convert_box(size, box): | ||
39 | # Convert VisDrone box to YOLO xywh box | ||
40 | dw = 1. / size[0] | ||
41 | dh = 1. / size[1] | ||
42 | return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh | ||
43 | |||
44 | (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory | ||
45 | pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}') | ||
46 | for f in pbar: | ||
47 | img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size | ||
48 | lines = [] | ||
49 | with open(f, 'r') as file: # read annotation.txt | ||
50 | for row in [x.split(',') for x in file.read().strip().splitlines()]: | ||
51 | if row[4] == '0': # VisDrone 'ignored regions' class 0 | ||
52 | continue | ||
53 | cls = int(row[5]) - 1 | ||
54 | box = convert_box(img_size, tuple(map(int, row[:4]))) | ||
55 | lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n") | ||
56 | with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl: | ||
57 | fl.writelines(lines) # write label.txt | ||
58 | |||
59 | |||
60 | # Download | ||
61 | dir = Path(yaml['path']) # dataset root dir | ||
62 | urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', | ||
63 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', | ||
64 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', | ||
65 | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] | ||
66 | download(urls, dir=dir, curl=True, threads=4) | ||
67 | |||
68 | # Convert | ||
69 | for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': | ||
70 | visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels |
data/coco.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # COCO 2017 dataset http://cocodataset.org by Microsoft | ||
3 | # Example usage: python train.py --data coco.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── coco ← downloads here (20.1 GB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/coco # dataset root dir | ||
12 | train: train2017.txt # train images (relative to 'path') 118287 images | ||
13 | val: val2017.txt # val images (relative to 'path') 5000 images | ||
14 | test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: person | ||
19 | 1: bicycle | ||
20 | 2: car | ||
21 | 3: motorcycle | ||
22 | 4: airplane | ||
23 | 5: bus | ||
24 | 6: train | ||
25 | 7: truck | ||
26 | 8: boat | ||
27 | 9: traffic light | ||
28 | 10: fire hydrant | ||
29 | 11: stop sign | ||
30 | 12: parking meter | ||
31 | 13: bench | ||
32 | 14: bird | ||
33 | 15: cat | ||
34 | 16: dog | ||
35 | 17: horse | ||
36 | 18: sheep | ||
37 | 19: cow | ||
38 | 20: elephant | ||
39 | 21: bear | ||
40 | 22: zebra | ||
41 | 23: giraffe | ||
42 | 24: backpack | ||
43 | 25: umbrella | ||
44 | 26: handbag | ||
45 | 27: tie | ||
46 | 28: suitcase | ||
47 | 29: frisbee | ||
48 | 30: skis | ||
49 | 31: snowboard | ||
50 | 32: sports ball | ||
51 | 33: kite | ||
52 | 34: baseball bat | ||
53 | 35: baseball glove | ||
54 | 36: skateboard | ||
55 | 37: surfboard | ||
56 | 38: tennis racket | ||
57 | 39: bottle | ||
58 | 40: wine glass | ||
59 | 41: cup | ||
60 | 42: fork | ||
61 | 43: knife | ||
62 | 44: spoon | ||
63 | 45: bowl | ||
64 | 46: banana | ||
65 | 47: apple | ||
66 | 48: sandwich | ||
67 | 49: orange | ||
68 | 50: broccoli | ||
69 | 51: carrot | ||
70 | 52: hot dog | ||
71 | 53: pizza | ||
72 | 54: donut | ||
73 | 55: cake | ||
74 | 56: chair | ||
75 | 57: couch | ||
76 | 58: potted plant | ||
77 | 59: bed | ||
78 | 60: dining table | ||
79 | 61: toilet | ||
80 | 62: tv | ||
81 | 63: laptop | ||
82 | 64: mouse | ||
83 | 65: remote | ||
84 | 66: keyboard | ||
85 | 67: cell phone | ||
86 | 68: microwave | ||
87 | 69: oven | ||
88 | 70: toaster | ||
89 | 71: sink | ||
90 | 72: refrigerator | ||
91 | 73: book | ||
92 | 74: clock | ||
93 | 75: vase | ||
94 | 76: scissors | ||
95 | 77: teddy bear | ||
96 | 78: hair drier | ||
97 | 79: toothbrush | ||
98 | |||
99 | |||
100 | # Download script/URL (optional) | ||
101 | download: | | ||
102 | from utils.general import download, Path | ||
103 | |||
104 | |||
105 | # Download labels | ||
106 | segments = False # segment or box labels | ||
107 | dir = Path(yaml['path']) # dataset root dir | ||
108 | url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' | ||
109 | urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels | ||
110 | download(urls, dir=dir.parent) | ||
111 | |||
112 | # Download data | ||
113 | urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images | ||
114 | 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images | ||
115 | 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) | ||
116 | download(urls, dir=dir / 'images', threads=3) |
data/coco128-seg.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics | ||
3 | # Example usage: python train.py --data coco128.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── coco128-seg ← downloads here (7 MB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/coco128-seg # dataset root dir | ||
12 | train: images/train2017 # train images (relative to 'path') 128 images | ||
13 | val: images/train2017 # val images (relative to 'path') 128 images | ||
14 | test: # test images (optional) | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: person | ||
19 | 1: bicycle | ||
20 | 2: car | ||
21 | 3: motorcycle | ||
22 | 4: airplane | ||
23 | 5: bus | ||
24 | 6: train | ||
25 | 7: truck | ||
26 | 8: boat | ||
27 | 9: traffic light | ||
28 | 10: fire hydrant | ||
29 | 11: stop sign | ||
30 | 12: parking meter | ||
31 | 13: bench | ||
32 | 14: bird | ||
33 | 15: cat | ||
34 | 16: dog | ||
35 | 17: horse | ||
36 | 18: sheep | ||
37 | 19: cow | ||
38 | 20: elephant | ||
39 | 21: bear | ||
40 | 22: zebra | ||
41 | 23: giraffe | ||
42 | 24: backpack | ||
43 | 25: umbrella | ||
44 | 26: handbag | ||
45 | 27: tie | ||
46 | 28: suitcase | ||
47 | 29: frisbee | ||
48 | 30: skis | ||
49 | 31: snowboard | ||
50 | 32: sports ball | ||
51 | 33: kite | ||
52 | 34: baseball bat | ||
53 | 35: baseball glove | ||
54 | 36: skateboard | ||
55 | 37: surfboard | ||
56 | 38: tennis racket | ||
57 | 39: bottle | ||
58 | 40: wine glass | ||
59 | 41: cup | ||
60 | 42: fork | ||
61 | 43: knife | ||
62 | 44: spoon | ||
63 | 45: bowl | ||
64 | 46: banana | ||
65 | 47: apple | ||
66 | 48: sandwich | ||
67 | 49: orange | ||
68 | 50: broccoli | ||
69 | 51: carrot | ||
70 | 52: hot dog | ||
71 | 53: pizza | ||
72 | 54: donut | ||
73 | 55: cake | ||
74 | 56: chair | ||
75 | 57: couch | ||
76 | 58: potted plant | ||
77 | 59: bed | ||
78 | 60: dining table | ||
79 | 61: toilet | ||
80 | 62: tv | ||
81 | 63: laptop | ||
82 | 64: mouse | ||
83 | 65: remote | ||
84 | 66: keyboard | ||
85 | 67: cell phone | ||
86 | 68: microwave | ||
87 | 69: oven | ||
88 | 70: toaster | ||
89 | 71: sink | ||
90 | 72: refrigerator | ||
91 | 73: book | ||
92 | 74: clock | ||
93 | 75: vase | ||
94 | 76: scissors | ||
95 | 77: teddy bear | ||
96 | 78: hair drier | ||
97 | 79: toothbrush | ||
98 | |||
99 | |||
100 | # Download script/URL (optional) | ||
101 | download: https://ultralytics.com/assets/coco128-seg.zip |
data/coco128.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics | ||
3 | # Example usage: python train.py --data coco128.yaml | ||
4 | # parent | ||
5 | # ├── yolov5 | ||
6 | # └── datasets | ||
7 | # └── coco128 ← downloads here (7 MB) | ||
8 | |||
9 | |||
10 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
11 | path: ../datasets/coco128 # dataset root dir | ||
12 | train: images/train2017 # train images (relative to 'path') 128 images | ||
13 | val: images/train2017 # val images (relative to 'path') 128 images | ||
14 | test: # test images (optional) | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: person | ||
19 | 1: bicycle | ||
20 | 2: car | ||
21 | 3: motorcycle | ||
22 | 4: airplane | ||
23 | 5: bus | ||
24 | 6: train | ||
25 | 7: truck | ||
26 | 8: boat | ||
27 | 9: traffic light | ||
28 | 10: fire hydrant | ||
29 | 11: stop sign | ||
30 | 12: parking meter | ||
31 | 13: bench | ||
32 | 14: bird | ||
33 | 15: cat | ||
34 | 16: dog | ||
35 | 17: horse | ||
36 | 18: sheep | ||
37 | 19: cow | ||
38 | 20: elephant | ||
39 | 21: bear | ||
40 | 22: zebra | ||
41 | 23: giraffe | ||
42 | 24: backpack | ||
43 | 25: umbrella | ||
44 | 26: handbag | ||
45 | 27: tie | ||
46 | 28: suitcase | ||
47 | 29: frisbee | ||
48 | 30: skis | ||
49 | 31: snowboard | ||
50 | 32: sports ball | ||
51 | 33: kite | ||
52 | 34: baseball bat | ||
53 | 35: baseball glove | ||
54 | 36: skateboard | ||
55 | 37: surfboard | ||
56 | 38: tennis racket | ||
57 | 39: bottle | ||
58 | 40: wine glass | ||
59 | 41: cup | ||
60 | 42: fork | ||
61 | 43: knife | ||
62 | 44: spoon | ||
63 | 45: bowl | ||
64 | 46: banana | ||
65 | 47: apple | ||
66 | 48: sandwich | ||
67 | 49: orange | ||
68 | 50: broccoli | ||
69 | 51: carrot | ||
70 | 52: hot dog | ||
71 | 53: pizza | ||
72 | 54: donut | ||
73 | 55: cake | ||
74 | 56: chair | ||
75 | 57: couch | ||
76 | 58: potted plant | ||
77 | 59: bed | ||
78 | 60: dining table | ||
79 | 61: toilet | ||
80 | 62: tv | ||
81 | 63: laptop | ||
82 | 64: mouse | ||
83 | 65: remote | ||
84 | 66: keyboard | ||
85 | 67: cell phone | ||
86 | 68: microwave | ||
87 | 69: oven | ||
88 | 70: toaster | ||
89 | 71: sink | ||
90 | 72: refrigerator | ||
91 | 73: book | ||
92 | 74: clock | ||
93 | 75: vase | ||
94 | 76: scissors | ||
95 | 77: teddy bear | ||
96 | 78: hair drier | ||
97 | 79: toothbrush | ||
98 | |||
99 | |||
100 | # Download script/URL (optional) | ||
101 | download: https://ultralytics.com/assets/coco128.zip |
data/hyps/hyp.Objects365.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Hyperparameters for Objects365 training | ||
3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve | ||
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials | ||
5 | |||
6 | lr0: 0.00258 | ||
7 | lrf: 0.17 | ||
8 | momentum: 0.779 | ||
9 | weight_decay: 0.00058 | ||
10 | warmup_epochs: 1.33 | ||
11 | warmup_momentum: 0.86 | ||
12 | warmup_bias_lr: 0.0711 | ||
13 | box: 0.0539 | ||
14 | cls: 0.299 | ||
15 | cls_pw: 0.825 | ||
16 | obj: 0.632 | ||
17 | obj_pw: 1.0 | ||
18 | iou_t: 0.2 | ||
19 | anchor_t: 3.44 | ||
20 | anchors: 3.2 | ||
21 | fl_gamma: 0.0 | ||
22 | hsv_h: 0.0188 | ||
23 | hsv_s: 0.704 | ||
24 | hsv_v: 0.36 | ||
25 | degrees: 0.0 | ||
26 | translate: 0.0902 | ||
27 | scale: 0.491 | ||
28 | shear: 0.0 | ||
29 | perspective: 0.0 | ||
30 | flipud: 0.0 | ||
31 | fliplr: 0.5 | ||
32 | mosaic: 1.0 | ||
33 | mixup: 0.0 | ||
34 | copy_paste: 0.0 |
data/hyps/hyp.VOC.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Hyperparameters for VOC training | ||
3 | # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve | ||
4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials | ||
5 | |||
6 | # YOLOv5 Hyperparameter Evolution Results | ||
7 | # Best generation: 467 | ||
8 | # Last generation: 996 | ||
9 | # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss | ||
10 | # 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865 | ||
11 | |||
12 | lr0: 0.00334 | ||
13 | lrf: 0.15135 | ||
14 | momentum: 0.74832 | ||
15 | weight_decay: 0.00025 | ||
16 | warmup_epochs: 3.3835 | ||
17 | warmup_momentum: 0.59462 | ||
18 | warmup_bias_lr: 0.18657 | ||
19 | box: 0.02 | ||
20 | cls: 0.21638 | ||
21 | cls_pw: 0.5 | ||
22 | obj: 0.51728 | ||
23 | obj_pw: 0.67198 | ||
24 | iou_t: 0.2 | ||
25 | anchor_t: 3.3744 | ||
26 | fl_gamma: 0.0 | ||
27 | hsv_h: 0.01041 | ||
28 | hsv_s: 0.54703 | ||
29 | hsv_v: 0.27739 | ||
30 | degrees: 0.0 | ||
31 | translate: 0.04591 | ||
32 | scale: 0.75544 | ||
33 | shear: 0.0 | ||
34 | perspective: 0.0 | ||
35 | flipud: 0.0 | ||
36 | fliplr: 0.5 | ||
37 | mosaic: 0.85834 | ||
38 | mixup: 0.04266 | ||
39 | copy_paste: 0.0 | ||
40 | anchors: 3.412 |
data/hyps/hyp.scratch-high.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Hyperparameters for high-augmentation COCO training from scratch | ||
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 | ||
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials | ||
5 | |||
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) | ||
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) | ||
8 | momentum: 0.937 # SGD momentum/Adam beta1 | ||
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 | ||
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) | ||
11 | warmup_momentum: 0.8 # warmup initial momentum | ||
12 | warmup_bias_lr: 0.1 # warmup initial bias lr | ||
13 | box: 0.05 # box loss gain | ||
14 | cls: 0.3 # cls loss gain | ||
15 | cls_pw: 1.0 # cls BCELoss positive_weight | ||
16 | obj: 0.7 # obj loss gain (scale with pixels) | ||
17 | obj_pw: 1.0 # obj BCELoss positive_weight | ||
18 | iou_t: 0.20 # IoU training threshold | ||
19 | anchor_t: 4.0 # anchor-multiple threshold | ||
20 | # anchors: 3 # anchors per output layer (0 to ignore) | ||
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) | ||
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) | ||
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) | ||
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) | ||
25 | degrees: 0.0 # image rotation (+/- deg) | ||
26 | translate: 0.1 # image translation (+/- fraction) | ||
27 | scale: 0.9 # image scale (+/- gain) | ||
28 | shear: 0.0 # image shear (+/- deg) | ||
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 | ||
30 | flipud: 0.0 # image flip up-down (probability) | ||
31 | fliplr: 0.5 # image flip left-right (probability) | ||
32 | mosaic: 1.0 # image mosaic (probability) | ||
33 | mixup: 0.1 # image mixup (probability) | ||
34 | copy_paste: 0.1 # segment copy-paste (probability) |
data/hyps/hyp.scratch-low.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Hyperparameters for low-augmentation COCO training from scratch | ||
3 | # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear | ||
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials | ||
5 | |||
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) | ||
7 | lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) | ||
8 | momentum: 0.937 # SGD momentum/Adam beta1 | ||
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 | ||
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) | ||
11 | warmup_momentum: 0.8 # warmup initial momentum | ||
12 | warmup_bias_lr: 0.1 # warmup initial bias lr | ||
13 | box: 0.05 # box loss gain | ||
14 | cls: 0.5 # cls loss gain | ||
15 | cls_pw: 1.0 # cls BCELoss positive_weight | ||
16 | obj: 1.0 # obj loss gain (scale with pixels) | ||
17 | obj_pw: 1.0 # obj BCELoss positive_weight | ||
18 | iou_t: 0.20 # IoU training threshold | ||
19 | anchor_t: 4.0 # anchor-multiple threshold | ||
20 | # anchors: 3 # anchors per output layer (0 to ignore) | ||
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) | ||
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) | ||
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) | ||
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) | ||
25 | degrees: 0.0 # image rotation (+/- deg) | ||
26 | translate: 0.1 # image translation (+/- fraction) | ||
27 | scale: 0.5 # image scale (+/- gain) | ||
28 | shear: 0.0 # image shear (+/- deg) | ||
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 | ||
30 | flipud: 0.0 # image flip up-down (probability) | ||
31 | fliplr: 0.5 # image flip left-right (probability) | ||
32 | mosaic: 1.0 # image mosaic (probability) | ||
33 | mixup: 0.0 # image mixup (probability) | ||
34 | copy_paste: 0.0 # segment copy-paste (probability) |
data/hyps/hyp.scratch-med.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Hyperparameters for medium-augmentation COCO training from scratch | ||
3 | # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 | ||
4 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials | ||
5 | |||
6 | lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) | ||
7 | lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) | ||
8 | momentum: 0.937 # SGD momentum/Adam beta1 | ||
9 | weight_decay: 0.0005 # optimizer weight decay 5e-4 | ||
10 | warmup_epochs: 3.0 # warmup epochs (fractions ok) | ||
11 | warmup_momentum: 0.8 # warmup initial momentum | ||
12 | warmup_bias_lr: 0.1 # warmup initial bias lr | ||
13 | box: 0.05 # box loss gain | ||
14 | cls: 0.3 # cls loss gain | ||
15 | cls_pw: 1.0 # cls BCELoss positive_weight | ||
16 | obj: 0.7 # obj loss gain (scale with pixels) | ||
17 | obj_pw: 1.0 # obj BCELoss positive_weight | ||
18 | iou_t: 0.20 # IoU training threshold | ||
19 | anchor_t: 4.0 # anchor-multiple threshold | ||
20 | # anchors: 3 # anchors per output layer (0 to ignore) | ||
21 | fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) | ||
22 | hsv_h: 0.015 # image HSV-Hue augmentation (fraction) | ||
23 | hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) | ||
24 | hsv_v: 0.4 # image HSV-Value augmentation (fraction) | ||
25 | degrees: 0.0 # image rotation (+/- deg) | ||
26 | translate: 0.1 # image translation (+/- fraction) | ||
27 | scale: 0.9 # image scale (+/- gain) | ||
28 | shear: 0.0 # image shear (+/- deg) | ||
29 | perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 | ||
30 | flipud: 0.0 # image flip up-down (probability) | ||
31 | fliplr: 0.5 # image flip left-right (probability) | ||
32 | mosaic: 1.0 # image mosaic (probability) | ||
33 | mixup: 0.1 # image mixup (probability) | ||
34 | copy_paste: 0.0 # segment copy-paste (probability) |
data/images/img.png
0 → 100644

698 KB
data/images/img_1.png
0 → 100644

249 KB
data/images/img_2.png
0 → 100644

222 KB
data/scripts/download_weights.sh
0 → 100755
1 | #!/bin/bash | ||
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
3 | # Download latest models from https://github.com/ultralytics/yolov5/releases | ||
4 | # Example usage: bash data/scripts/download_weights.sh | ||
5 | # parent | ||
6 | # └── yolov5 | ||
7 | # ├── yolov5s.pt ← downloads here | ||
8 | # ├── yolov5m.pt | ||
9 | # └── ... | ||
10 | |||
11 | python - <<EOF | ||
12 | from utils.downloads import attempt_download | ||
13 | |||
14 | p5 = ['n', 's', 'm', 'l', 'x'] # P5 models | ||
15 | p6 = [f'{x}6' for x in p5] # P6 models | ||
16 | cls = [f'{x}-cls' for x in p5] # classification models | ||
17 | |||
18 | for x in p5 + p6 + cls: | ||
19 | attempt_download(f'weights/yolov5{x}.pt') | ||
20 | |||
21 | EOF |
data/scripts/get_coco.sh
0 → 100755
1 | #!/bin/bash | ||
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
3 | # Download COCO 2017 dataset http://cocodataset.org | ||
4 | # Example usage: bash data/scripts/get_coco.sh | ||
5 | # parent | ||
6 | # ├── yolov5 | ||
7 | # └── datasets | ||
8 | # └── coco ← downloads here | ||
9 | |||
10 | # Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments | ||
11 | if [ "$#" -gt 0 ]; then | ||
12 | for opt in "$@"; do | ||
13 | case "${opt}" in | ||
14 | --train) train=true ;; | ||
15 | --val) val=true ;; | ||
16 | --test) test=true ;; | ||
17 | --segments) segments=true ;; | ||
18 | esac | ||
19 | done | ||
20 | else | ||
21 | train=true | ||
22 | val=true | ||
23 | test=false | ||
24 | segments=false | ||
25 | fi | ||
26 | |||
27 | # Download/unzip labels | ||
28 | d='../datasets' # unzip directory | ||
29 | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ | ||
30 | if [ "$segments" == "true" ]; then | ||
31 | f='coco2017labels-segments.zip' # 168 MB | ||
32 | else | ||
33 | f='coco2017labels.zip' # 168 MB | ||
34 | fi | ||
35 | echo 'Downloading' $url$f ' ...' | ||
36 | curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & | ||
37 | |||
38 | # Download/unzip images | ||
39 | d='../datasets/coco/images' # unzip directory | ||
40 | url=http://images.cocodataset.org/zips/ | ||
41 | if [ "$train" == "true" ]; then | ||
42 | f='train2017.zip' # 19G, 118k images | ||
43 | echo 'Downloading' $url$f '...' | ||
44 | curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & | ||
45 | fi | ||
46 | if [ "$val" == "true" ]; then | ||
47 | f='val2017.zip' # 1G, 5k images | ||
48 | echo 'Downloading' $url$f '...' | ||
49 | curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & | ||
50 | fi | ||
51 | if [ "$test" == "true" ]; then | ||
52 | f='test2017.zip' # 7G, 41k images (optional) | ||
53 | echo 'Downloading' $url$f '...' | ||
54 | curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & | ||
55 | fi | ||
56 | wait # finish background tasks |
data/scripts/get_coco128.sh
0 → 100755
1 | #!/bin/bash | ||
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
3 | # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) | ||
4 | # Example usage: bash data/scripts/get_coco128.sh | ||
5 | # parent | ||
6 | # ├── yolov5 | ||
7 | # └── datasets | ||
8 | # └── coco128 ← downloads here | ||
9 | |||
10 | # Download/unzip images and labels | ||
11 | d='../datasets' # unzip directory | ||
12 | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ | ||
13 | f='coco128.zip' # or 'coco128-segments.zip', 68 MB | ||
14 | echo 'Downloading' $url$f ' ...' | ||
15 | curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & | ||
16 | |||
17 | wait # finish background tasks |
data/scripts/get_imagenet.sh
0 → 100755
1 | #!/bin/bash | ||
2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
3 | # Download ILSVRC2012 ImageNet dataset https://image-net.org | ||
4 | # Example usage: bash data/scripts/get_imagenet.sh | ||
5 | # parent | ||
6 | # ├── yolov5 | ||
7 | # └── datasets | ||
8 | # └── imagenet ← downloads here | ||
9 | |||
10 | # Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val | ||
11 | if [ "$#" -gt 0 ]; then | ||
12 | for opt in "$@"; do | ||
13 | case "${opt}" in | ||
14 | --train) train=true ;; | ||
15 | --val) val=true ;; | ||
16 | esac | ||
17 | done | ||
18 | else | ||
19 | train=true | ||
20 | val=true | ||
21 | fi | ||
22 | |||
23 | # Make dir | ||
24 | d='../datasets/imagenet' # unzip directory | ||
25 | mkdir -p $d && cd $d | ||
26 | |||
27 | # Download/unzip train | ||
28 | if [ "$train" == "true" ]; then | ||
29 | wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images | ||
30 | mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train | ||
31 | tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar | ||
32 | find . -name "*.tar" | while read NAME; do | ||
33 | mkdir -p "${NAME%.tar}" | ||
34 | tar -xf "${NAME}" -C "${NAME%.tar}" | ||
35 | rm -f "${NAME}" | ||
36 | done | ||
37 | cd .. | ||
38 | fi | ||
39 | |||
40 | # Download/unzip val | ||
41 | if [ "$val" == "true" ]; then | ||
42 | wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images | ||
43 | mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar | ||
44 | wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs | ||
45 | fi | ||
46 | |||
47 | # Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail) | ||
48 | # rm train/n04266014/n04266014_10835.JPEG | ||
49 | |||
50 | # TFRecords (optional) | ||
51 | # wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt |
data/xView.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) | ||
3 | # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- | ||
4 | # Example usage: python train.py --data xView.yaml | ||
5 | # parent | ||
6 | # ├── yolov5 | ||
7 | # └── datasets | ||
8 | # └── xView ← downloads here (20.7 GB) | ||
9 | |||
10 | |||
11 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] | ||
12 | path: ../datasets/xView # dataset root dir | ||
13 | train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images | ||
14 | val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images | ||
15 | |||
16 | # Classes | ||
17 | names: | ||
18 | 0: Fixed-wing Aircraft | ||
19 | 1: Small Aircraft | ||
20 | 2: Cargo Plane | ||
21 | 3: Helicopter | ||
22 | 4: Passenger Vehicle | ||
23 | 5: Small Car | ||
24 | 6: Bus | ||
25 | 7: Pickup Truck | ||
26 | 8: Utility Truck | ||
27 | 9: Truck | ||
28 | 10: Cargo Truck | ||
29 | 11: Truck w/Box | ||
30 | 12: Truck Tractor | ||
31 | 13: Trailer | ||
32 | 14: Truck w/Flatbed | ||
33 | 15: Truck w/Liquid | ||
34 | 16: Crane Truck | ||
35 | 17: Railway Vehicle | ||
36 | 18: Passenger Car | ||
37 | 19: Cargo Car | ||
38 | 20: Flat Car | ||
39 | 21: Tank car | ||
40 | 22: Locomotive | ||
41 | 23: Maritime Vessel | ||
42 | 24: Motorboat | ||
43 | 25: Sailboat | ||
44 | 26: Tugboat | ||
45 | 27: Barge | ||
46 | 28: Fishing Vessel | ||
47 | 29: Ferry | ||
48 | 30: Yacht | ||
49 | 31: Container Ship | ||
50 | 32: Oil Tanker | ||
51 | 33: Engineering Vehicle | ||
52 | 34: Tower crane | ||
53 | 35: Container Crane | ||
54 | 36: Reach Stacker | ||
55 | 37: Straddle Carrier | ||
56 | 38: Mobile Crane | ||
57 | 39: Dump Truck | ||
58 | 40: Haul Truck | ||
59 | 41: Scraper/Tractor | ||
60 | 42: Front loader/Bulldozer | ||
61 | 43: Excavator | ||
62 | 44: Cement Mixer | ||
63 | 45: Ground Grader | ||
64 | 46: Hut/Tent | ||
65 | 47: Shed | ||
66 | 48: Building | ||
67 | 49: Aircraft Hangar | ||
68 | 50: Damaged Building | ||
69 | 51: Facility | ||
70 | 52: Construction Site | ||
71 | 53: Vehicle Lot | ||
72 | 54: Helipad | ||
73 | 55: Storage Tank | ||
74 | 56: Shipping container lot | ||
75 | 57: Shipping Container | ||
76 | 58: Pylon | ||
77 | 59: Tower | ||
78 | |||
79 | |||
80 | # Download script/URL (optional) --------------------------------------------------------------------------------------- | ||
81 | download: | | ||
82 | import json | ||
83 | import os | ||
84 | from pathlib import Path | ||
85 | |||
86 | import numpy as np | ||
87 | from PIL import Image | ||
88 | from tqdm import tqdm | ||
89 | |||
90 | from utils.datasets import autosplit | ||
91 | from utils.general import download, xyxy2xywhn | ||
92 | |||
93 | |||
94 | def convert_labels(fname=Path('xView/xView_train.geojson')): | ||
95 | # Convert xView geoJSON labels to YOLO format | ||
96 | path = fname.parent | ||
97 | with open(fname) as f: | ||
98 | print(f'Loading {fname}...') | ||
99 | data = json.load(f) | ||
100 | |||
101 | # Make dirs | ||
102 | labels = Path(path / 'labels' / 'train') | ||
103 | os.system(f'rm -rf {labels}') | ||
104 | labels.mkdir(parents=True, exist_ok=True) | ||
105 | |||
106 | # xView classes 11-94 to 0-59 | ||
107 | xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11, | ||
108 | 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1, | ||
109 | 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46, | ||
110 | 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59] | ||
111 | |||
112 | shapes = {} | ||
113 | for feature in tqdm(data['features'], desc=f'Converting {fname}'): | ||
114 | p = feature['properties'] | ||
115 | if p['bounds_imcoords']: | ||
116 | id = p['image_id'] | ||
117 | file = path / 'train_images' / id | ||
118 | if file.exists(): # 1395.tif missing | ||
119 | try: | ||
120 | box = np.array([int(num) for num in p['bounds_imcoords'].split(",")]) | ||
121 | assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}' | ||
122 | cls = p['type_id'] | ||
123 | cls = xview_class2index[int(cls)] # xView class to 0-60 | ||
124 | assert 59 >= cls >= 0, f'incorrect class index {cls}' | ||
125 | |||
126 | # Write YOLO label | ||
127 | if id not in shapes: | ||
128 | shapes[id] = Image.open(file).size | ||
129 | box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) | ||
130 | with open((labels / id).with_suffix('.txt'), 'a') as f: | ||
131 | f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt | ||
132 | except Exception as e: | ||
133 | print(f'WARNING: skipping one label for {file}: {e}') | ||
134 | |||
135 | |||
136 | # Download manually from https://challenge.xviewdataset.org | ||
137 | dir = Path(yaml['path']) # dataset root dir | ||
138 | # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels | ||
139 | # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images | ||
140 | # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) | ||
141 | # download(urls, dir=dir, delete=False) | ||
142 | |||
143 | # Convert labels | ||
144 | convert_labels(dir / 'xView_train.geojson') | ||
145 | |||
146 | # Move images | ||
147 | images = Path(dir / 'images') | ||
148 | images.mkdir(parents=True, exist_ok=True) | ||
149 | Path(dir / 'train_images').rename(dir / 'images' / 'train') | ||
150 | Path(dir / 'val_images').rename(dir / 'images' / 'val') | ||
151 | |||
152 | # Split | ||
153 | autosplit(dir / 'images' / 'train') |
detect.py
0 → 100644
This diff is collapsed.
Click to expand it.
export.py
0 → 100644
This diff is collapsed.
Click to expand it.
get_pr.py
0 → 100644
1 | import os | ||
2 | |||
3 | import cv2 | ||
4 | import numpy as np | ||
5 | from sklearn.metrics import precision_score, recall_score, confusion_matrix | ||
6 | |||
7 | |||
8 | def iou(box, boxes): | ||
9 | x1, y1, x2, y2 = box | ||
10 | x1s, y1s, x2s, y2s = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] | ||
11 | area1 = abs(x2 - x1) * abs(y2 - y1) | ||
12 | areas = (x2s - x1s) * (y2s - y1s) | ||
13 | xx1 = np.maximum(x1, x1s) | ||
14 | yy1 = np.maximum(y1, y1s) | ||
15 | xx2 = np.minimum(x2, x2s) | ||
16 | yy2 = np.minimum(y2, y2s) | ||
17 | inner = np.maximum(0, (xx2 - xx1) * (yy2 - yy1)) | ||
18 | return inner / (area1 + areas - inner) | ||
19 | |||
20 | |||
21 | def get_evaluate_score(true_image_path, true_label_path, predict_label_path, threshold): | ||
22 | true_labels = os.listdir(true_label_path) | ||
23 | predict_labels = os.listdir(predict_label_path) | ||
24 | targets, predicts = [], [] | ||
25 | for label in true_labels: | ||
26 | true_label = open(os.path.join(true_label_path, label)).readlines() | ||
27 | img = cv2.imread(os.path.join(true_image_path, label.replace('.txt', '.jpg'))) | ||
28 | h, w, c = img.shape | ||
29 | if len(true_label) == 0: | ||
30 | targets.append(0) | ||
31 | if label in predict_labels: | ||
32 | predicts.append(1) | ||
33 | else: | ||
34 | predicts.append(0) | ||
35 | |||
36 | else: | ||
37 | targets.append(1) | ||
38 | if label not in predict_labels: | ||
39 | predicts.append(0) | ||
40 | else: | ||
41 | tmp = 0 | ||
42 | predict_label = open(os.path.join(predict_label_path, label)).readlines() | ||
43 | boxes = [] | ||
44 | for pl in predict_label: | ||
45 | cls, x1, y1, w1, h1 = [float(i) for i in pl.strip().split(' ')] | ||
46 | x1, y1, w1, h1 = int(x1 * w), int(y1 * h), int(w1 * w), int(h1 * h) | ||
47 | xx1, yy1, xx2, yy2 = x1 - w1 // 2, y1 - h1 // 2, x1 + w1 // 2, y1 + h1 // 2 | ||
48 | boxes.append([xx1, yy1, xx2, yy2]) | ||
49 | for tl in true_label: | ||
50 | cls, x1, y1, w1, h1 = [float(i) for i in tl.strip().split(' ')] | ||
51 | x1, y1, w1, h1 = int(x1 * w), int(y1 * h), int(w1 * w), int(h1 * h) | ||
52 | xx1, yy1, xx2, yy2 = x1 - w1 // 2, y1 - h1 // 2, x1 + w1 // 2, y1 + h1 // 2 | ||
53 | box1 = [xx1, yy1, xx2, yy2] | ||
54 | inner_score = iou(np.array(box1), np.array(boxes)) | ||
55 | if max(inner_score) > threshold: | ||
56 | tmp = 1 | ||
57 | predicts.append(1) | ||
58 | break | ||
59 | if tmp == 0: | ||
60 | predicts.append(0) | ||
61 | p = precision_score(targets, predicts) | ||
62 | r = recall_score(targets, predicts) | ||
63 | conf = confusion_matrix(targets, predicts) | ||
64 | print('precison:', p) | ||
65 | print('recall:', r) | ||
66 | print(conf) | ||
67 | print(f' 预 测 ') | ||
68 | print(f' authentic tampered ') | ||
69 | print(f'真 authentic \t\t{conf[0, 0]} \t\t{conf[0,1]}') | ||
70 | print(f'实 tempered \t\t{conf[1, 0]} \t\t\t{conf[1,1]}') | ||
71 | print(f'authentic precision:{conf[0,0]/(conf[0,0]+conf[1,0])}\trecall:{conf[0, 0]/(conf[0, 0]+conf[0, 1])}') | ||
72 | print(f'tampered precision:{conf[1, 1]/(conf[0, 1]+conf[1, 1])}\trecall:{conf[1, 1]/(conf[1, 0]+conf[1, 1])}') | ||
73 | if __name__ == '__main__': | ||
74 | true_image_path = '/data/situ_invoice_bill_data/qfs_train_val_data/gongshang/images/val' | ||
75 | true_label_path = '/data/situ_invoice_bill_data/qfs_train_val_data/gongshang/labels/val' | ||
76 | predict_label_path = '/home/situ/qfs/invoice_tamper/09_project/project/tamper_det/runs/detect/exp4/labels' | ||
77 | threshold = 0.1 | ||
78 | get_evaluate_score(true_image_path, true_label_path, predict_label_path, threshold) |
hubconf.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 | ||
4 | |||
5 | Usage: | ||
6 | import torch | ||
7 | model = torch.hub.load('ultralytics/yolov5', 'yolov5s') | ||
8 | model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch | ||
9 | """ | ||
10 | |||
11 | import torch | ||
12 | |||
13 | |||
14 | def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): | ||
15 | """Creates or loads a YOLOv5 model | ||
16 | |||
17 | Arguments: | ||
18 | name (str): model name 'yolov5s' or path 'path/to/best.pt' | ||
19 | pretrained (bool): load pretrained weights into the model | ||
20 | channels (int): number of input channels | ||
21 | classes (int): number of model classes | ||
22 | autoshape (bool): apply YOLOv5 .autoshape() wrapper to model | ||
23 | verbose (bool): print all information to screen | ||
24 | device (str, torch.device, None): device to use for model parameters | ||
25 | |||
26 | Returns: | ||
27 | YOLOv5 model | ||
28 | """ | ||
29 | from pathlib import Path | ||
30 | |||
31 | from models.common import AutoShape, DetectMultiBackend | ||
32 | from models.experimental import attempt_load | ||
33 | from models.yolo import ClassificationModel, DetectionModel, SegmentationModel | ||
34 | from utils.downloads import attempt_download | ||
35 | from utils.general import LOGGER, check_requirements, intersect_dicts, logging | ||
36 | from utils.torch_utils import select_device | ||
37 | |||
38 | if not verbose: | ||
39 | LOGGER.setLevel(logging.WARNING) | ||
40 | check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) | ||
41 | name = Path(name) | ||
42 | path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path | ||
43 | try: | ||
44 | device = select_device(device) | ||
45 | if pretrained and channels == 3 and classes == 80: | ||
46 | try: | ||
47 | model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model | ||
48 | if autoshape: | ||
49 | if model.pt and isinstance(model.model, ClassificationModel): | ||
50 | LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' | ||
51 | 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') | ||
52 | elif model.pt and isinstance(model.model, SegmentationModel): | ||
53 | LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' | ||
54 | 'You will not be able to run inference with this model.') | ||
55 | else: | ||
56 | model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS | ||
57 | except Exception: | ||
58 | model = attempt_load(path, device=device, fuse=False) # arbitrary model | ||
59 | else: | ||
60 | cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path | ||
61 | model = DetectionModel(cfg, channels, classes) # create model | ||
62 | if pretrained: | ||
63 | ckpt = torch.load(attempt_download(path), map_location=device) # load | ||
64 | csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 | ||
65 | csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect | ||
66 | model.load_state_dict(csd, strict=False) # load | ||
67 | if len(ckpt['model'].names) == classes: | ||
68 | model.names = ckpt['model'].names # set class names attribute | ||
69 | if not verbose: | ||
70 | LOGGER.setLevel(logging.INFO) # reset to default | ||
71 | return model.to(device) | ||
72 | |||
73 | except Exception as e: | ||
74 | help_url = 'https://github.com/ultralytics/yolov5/issues/36' | ||
75 | s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' | ||
76 | raise Exception(s) from e | ||
77 | |||
78 | |||
79 | def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): | ||
80 | # YOLOv5 custom or local model | ||
81 | return _create(path, autoshape=autoshape, verbose=_verbose, device=device) | ||
82 | |||
83 | |||
84 | def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
85 | # YOLOv5-nano model https://github.com/ultralytics/yolov5 | ||
86 | return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) | ||
87 | |||
88 | |||
89 | def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
90 | # YOLOv5-small model https://github.com/ultralytics/yolov5 | ||
91 | return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) | ||
92 | |||
93 | |||
94 | def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
95 | # YOLOv5-medium model https://github.com/ultralytics/yolov5 | ||
96 | return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) | ||
97 | |||
98 | |||
99 | def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
100 | # YOLOv5-large model https://github.com/ultralytics/yolov5 | ||
101 | return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) | ||
102 | |||
103 | |||
104 | def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
105 | # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 | ||
106 | return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) | ||
107 | |||
108 | |||
109 | def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
110 | # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 | ||
111 | return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) | ||
112 | |||
113 | |||
114 | def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
115 | # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 | ||
116 | return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) | ||
117 | |||
118 | |||
119 | def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
120 | # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 | ||
121 | return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) | ||
122 | |||
123 | |||
124 | def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
125 | # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 | ||
126 | return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) | ||
127 | |||
128 | |||
129 | def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): | ||
130 | # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 | ||
131 | return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) | ||
132 | |||
133 | |||
134 | if __name__ == '__main__': | ||
135 | import argparse | ||
136 | from pathlib import Path | ||
137 | |||
138 | import numpy as np | ||
139 | from PIL import Image | ||
140 | |||
141 | from utils.general import cv2, print_args | ||
142 | |||
143 | # Argparser | ||
144 | parser = argparse.ArgumentParser() | ||
145 | parser.add_argument('--model', type=str, default='yolov5s', help='model name') | ||
146 | opt = parser.parse_args() | ||
147 | print_args(vars(opt)) | ||
148 | |||
149 | # Model | ||
150 | model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) | ||
151 | # model = custom(path='path/to/model.pt') # custom | ||
152 | |||
153 | # Images | ||
154 | imgs = [ | ||
155 | 'data/images/zidane.jpg', # filename | ||
156 | Path('data/images/zidane.jpg'), # Path | ||
157 | 'https://ultralytics.com/images/zidane.jpg', # URI | ||
158 | cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV | ||
159 | Image.open('data/images/bus.jpg'), # PIL | ||
160 | np.zeros((320, 640, 3))] # numpy | ||
161 | |||
162 | # Inference | ||
163 | results = model(imgs, size=320) # batched inference | ||
164 | |||
165 | # Results | ||
166 | results.print() | ||
167 | results.save() |
inference.py
0 → 100644
1 | import copy | ||
2 | import os | ||
3 | import sys | ||
4 | from pathlib import Path | ||
5 | import numpy as np | ||
6 | import torch | ||
7 | |||
8 | from utils.augmentations import letterbox | ||
9 | |||
10 | FILE = Path(__file__).resolve() | ||
11 | ROOT = FILE.parents[0] # YOLOv5 root directory | ||
12 | if str(ROOT) not in sys.path: | ||
13 | sys.path.append(str(ROOT)) # add ROOT to PATH | ||
14 | ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative | ||
15 | from models.common import DetectMultiBackend | ||
16 | from utils.general import (check_img_size, cv2, non_max_suppression, scale_boxes) | ||
17 | from utils.torch_utils import select_device, smart_inference_mode | ||
18 | from models.yolov5_config import config | ||
19 | |||
20 | classes = ['tampered'] | ||
21 | |||
22 | |||
23 | def gen_result_dict(boxes, label_list=[], std=False): | ||
24 | result = { | ||
25 | "error_code": 1, | ||
26 | "result": [] | ||
27 | } | ||
28 | rs_box = { | ||
29 | "class": '', | ||
30 | "score": 0, | ||
31 | "left": 0, | ||
32 | "top": 0, | ||
33 | "width": 0, | ||
34 | "height": 0 | ||
35 | } | ||
36 | |||
37 | if not label_list: | ||
38 | label_list = classes | ||
39 | |||
40 | for box in boxes: | ||
41 | result['error_code'] = 0 | ||
42 | box_dict = copy.deepcopy(rs_box) | ||
43 | if std: | ||
44 | box_dict['class'] = str(int(box[-1])) | ||
45 | else: | ||
46 | box_dict['class'] = label_list[int(box[-1])] | ||
47 | |||
48 | box_dict['left'] = int(round(box[0], 0)) | ||
49 | box_dict['top'] = int(round(box[1], 0)) | ||
50 | box_dict['width'] = int(round(box[2], 0) - round(box[0], 0)) | ||
51 | box_dict['height'] = int(round(box[3], 0) - (round(box[1], 0))) | ||
52 | box_dict['score'] = box[-2] | ||
53 | result['result'].append(box_dict) | ||
54 | return result | ||
55 | |||
56 | |||
57 | def keep_resize_padding(image): | ||
58 | h, w, c = image.shape | ||
59 | if h >= w: | ||
60 | pad1 = (h - w) // 2 | ||
61 | pad2 = h - w - pad1 | ||
62 | p1 = np.ones((h, pad1, 3)) * 114.0 | ||
63 | p2 = np.ones((h, pad2, 3)) * 114.0 | ||
64 | p1, p2 = p1.astype(np.uint8), p2.astype(np.uint8) | ||
65 | new_image = np.hstack((p1, image, p2)) | ||
66 | else: | ||
67 | pad1 = (w - h) // 2 | ||
68 | pad2 = w - h - pad1 | ||
69 | p1 = np.ones((pad1, w, 3)) * 114.0 | ||
70 | p2 = np.ones((pad2, w, 3)) * 114.0 | ||
71 | p1, p2 = p1.astype(np.uint8), p2.astype(np.uint8) | ||
72 | new_image = np.vstack((p1, image, p2)) | ||
73 | new_image = cv2.resize(new_image, (640, 640)) | ||
74 | return new_image | ||
75 | |||
76 | |||
77 | class Yolov5: | ||
78 | def __init__(self, cfg=None): | ||
79 | self.cfg = cfg | ||
80 | self.device = select_device(self.cfg.device) | ||
81 | self.model = DetectMultiBackend(self.cfg.weights, device=self.device, dnn=False, data=self.cfg.data, fp16=False) | ||
82 | |||
83 | def detect(self, image): | ||
84 | image0 = image.copy() | ||
85 | stride, names, pt = self.model.stride, self.model.names, self.model.pt | ||
86 | imgsz = check_img_size(self.cfg.imgsz, s=stride) # check image size | ||
87 | # Dataloader | ||
88 | bs = 1 # batch_size | ||
89 | # im = letterbox(image, imgsz, stride=stride, auto=True)[0] # padded resize | ||
90 | # hh, ww, cc = im.shape | ||
91 | # tlen1 = (640 - hh) // 2 | ||
92 | # tlen2 = 640 - hh - tlen1 | ||
93 | # t1 = np.zeros((tlen1, ww, cc)) | ||
94 | # t2 = np.zeros((tlen2, ww, cc)) | ||
95 | # im = np.vstack((t1, im, t2)) | ||
96 | im = keep_resize_padding(image) | ||
97 | |||
98 | # print(im.shape) | ||
99 | im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB | ||
100 | im = np.ascontiguousarray(im) # contiguous | ||
101 | # Run inference | ||
102 | self.model.warmup(imgsz=(1 if pt or self.model.triton else bs, 3, *imgsz)) # warmup | ||
103 | im = torch.from_numpy(im).to(self.model.device) | ||
104 | im = im.half() if self.model.fp16 else im.float() # uint8 to fp16/32 | ||
105 | im /= 255 # 0 - 255 to 0.0 - 1.0 | ||
106 | |||
107 | if len(im.shape) == 3: | ||
108 | im = im[None] # expand for batch dim | ||
109 | # Inference | ||
110 | pred = self.model(im, augment=False, visualize=False) | ||
111 | # print(pred[0].shape) | ||
112 | # exit(0) | ||
113 | # NMS | ||
114 | pred = non_max_suppression(pred, self.cfg.conf_thres, self.cfg.iou_thres, None, False, max_det=self.cfg.max_det) | ||
115 | det = pred[0] | ||
116 | # if len(det): | ||
117 | det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], image0.shape).round() | ||
118 | result = gen_result_dict(det.cpu().numpy().tolist()) | ||
119 | return result | ||
120 | |||
121 | def plot(self, image, boxes): | ||
122 | for box in boxes: | ||
123 | cv2.rectangle(image, (box[0], box[1], box[2], box[3]), (0, 0, 255), 2) | ||
124 | return image | ||
125 | |||
126 | |||
127 | if __name__ == "__main__": | ||
128 | img = cv2.imread( | ||
129 | '/data/situ_invoice_bill_data/qfs_train_val_data/train_data/authentic/gongshang/images/val/_1594890232.0110397page_11_img_0_name_au_gongshang.jpg') | ||
130 | detector = Yolov5(config) | ||
131 | result = detector.detect(img) | ||
132 | for i in result['result']: | ||
133 | position = list(i.values())[2:] | ||
134 | print(position) | ||
135 | cv2.rectangle(img, (position[0], position[1]), (position[0] + position[2], position[1] + position[3]), | ||
136 | (0, 0, 255)) | ||
137 | cv2.imshow('w', img) | ||
138 | cv2.waitKey(0) | ||
139 | print(result) |
models/__init__.py
0 → 100644
File mode changed
models/common.py
0 → 100644
This diff is collapsed.
Click to expand it.
models/experimental.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Experimental modules | ||
4 | """ | ||
5 | import math | ||
6 | |||
7 | import numpy as np | ||
8 | import torch | ||
9 | import torch.nn as nn | ||
10 | |||
11 | from utils.downloads import attempt_download | ||
12 | |||
13 | |||
14 | class Sum(nn.Module): | ||
15 | # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 | ||
16 | def __init__(self, n, weight=False): # n: number of inputs | ||
17 | super().__init__() | ||
18 | self.weight = weight # apply weights boolean | ||
19 | self.iter = range(n - 1) # iter object | ||
20 | if weight: | ||
21 | self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights | ||
22 | |||
23 | def forward(self, x): | ||
24 | y = x[0] # no weight | ||
25 | if self.weight: | ||
26 | w = torch.sigmoid(self.w) * 2 | ||
27 | for i in self.iter: | ||
28 | y = y + x[i + 1] * w[i] | ||
29 | else: | ||
30 | for i in self.iter: | ||
31 | y = y + x[i + 1] | ||
32 | return y | ||
33 | |||
34 | |||
35 | class MixConv2d(nn.Module): | ||
36 | # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 | ||
37 | def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy | ||
38 | super().__init__() | ||
39 | n = len(k) # number of convolutions | ||
40 | if equal_ch: # equal c_ per group | ||
41 | i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices | ||
42 | c_ = [(i == g).sum() for g in range(n)] # intermediate channels | ||
43 | else: # equal weight.numel() per group | ||
44 | b = [c2] + [0] * n | ||
45 | a = np.eye(n + 1, n, k=-1) | ||
46 | a -= np.roll(a, 1, axis=1) | ||
47 | a *= np.array(k) ** 2 | ||
48 | a[0] = 1 | ||
49 | c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b | ||
50 | |||
51 | self.m = nn.ModuleList([ | ||
52 | nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) | ||
53 | self.bn = nn.BatchNorm2d(c2) | ||
54 | self.act = nn.SiLU() | ||
55 | |||
56 | def forward(self, x): | ||
57 | return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) | ||
58 | |||
59 | |||
60 | class Ensemble(nn.ModuleList): | ||
61 | # Ensemble of models | ||
62 | def __init__(self): | ||
63 | super().__init__() | ||
64 | |||
65 | def forward(self, x, augment=False, profile=False, visualize=False): | ||
66 | y = [module(x, augment, profile, visualize)[0] for module in self] | ||
67 | # y = torch.stack(y).max(0)[0] # max ensemble | ||
68 | # y = torch.stack(y).mean(0) # mean ensemble | ||
69 | y = torch.cat(y, 1) # nms ensemble | ||
70 | return y, None # inference, train output | ||
71 | |||
72 | |||
73 | def attempt_load(weights, device=None, inplace=True, fuse=True): | ||
74 | # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a | ||
75 | from models.yolo import Detect, Model | ||
76 | |||
77 | model = Ensemble() | ||
78 | for w in weights if isinstance(weights, list) else [weights]: | ||
79 | ckpt = torch.load(attempt_download(w), map_location='cpu') # load | ||
80 | ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model | ||
81 | |||
82 | # Model compatibility updates | ||
83 | if not hasattr(ckpt, 'stride'): | ||
84 | ckpt.stride = torch.tensor([32.]) | ||
85 | if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): | ||
86 | ckpt.names = dict(enumerate(ckpt.names)) # convert to dict | ||
87 | |||
88 | model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode | ||
89 | |||
90 | # Module compatibility updates | ||
91 | for m in model.modules(): | ||
92 | t = type(m) | ||
93 | if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): | ||
94 | m.inplace = inplace # torch 1.7.0 compatibility | ||
95 | if t is Detect and not isinstance(m.anchor_grid, list): | ||
96 | delattr(m, 'anchor_grid') | ||
97 | setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) | ||
98 | elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): | ||
99 | m.recompute_scale_factor = None # torch 1.11.0 compatibility | ||
100 | |||
101 | # Return model | ||
102 | if len(model) == 1: | ||
103 | return model[-1] | ||
104 | |||
105 | # Return detection ensemble | ||
106 | print(f'Ensemble created with {weights}\n') | ||
107 | for k in 'names', 'nc', 'yaml': | ||
108 | setattr(model, k, getattr(model[0], k)) | ||
109 | model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride | ||
110 | assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' | ||
111 | return model |
models/hub/anchors.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Default anchors for COCO data | ||
3 | |||
4 | |||
5 | # P5 ------------------------------------------------------------------------------------------------------------------- | ||
6 | # P5-640: | ||
7 | anchors_p5_640: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | |||
13 | # P6 ------------------------------------------------------------------------------------------------------------------- | ||
14 | # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 | ||
15 | anchors_p6_640: | ||
16 | - [9,11, 21,19, 17,41] # P3/8 | ||
17 | - [43,32, 39,70, 86,64] # P4/16 | ||
18 | - [65,131, 134,130, 120,265] # P5/32 | ||
19 | - [282,180, 247,354, 512,387] # P6/64 | ||
20 | |||
21 | # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 | ||
22 | anchors_p6_1280: | ||
23 | - [19,27, 44,40, 38,94] # P3/8 | ||
24 | - [96,68, 86,152, 180,137] # P4/16 | ||
25 | - [140,301, 303,264, 238,542] # P5/32 | ||
26 | - [436,615, 739,380, 925,792] # P6/64 | ||
27 | |||
28 | # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 | ||
29 | anchors_p6_1920: | ||
30 | - [28,41, 67,59, 57,141] # P3/8 | ||
31 | - [144,103, 129,227, 270,205] # P4/16 | ||
32 | - [209,452, 455,396, 358,812] # P5/32 | ||
33 | - [653,922, 1109,570, 1387,1187] # P6/64 | ||
34 | |||
35 | |||
36 | # P7 ------------------------------------------------------------------------------------------------------------------- | ||
37 | # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 | ||
38 | anchors_p7_640: | ||
39 | - [11,11, 13,30, 29,20] # P3/8 | ||
40 | - [30,46, 61,38, 39,92] # P4/16 | ||
41 | - [78,80, 146,66, 79,163] # P5/32 | ||
42 | - [149,150, 321,143, 157,303] # P6/64 | ||
43 | - [257,402, 359,290, 524,372] # P7/128 | ||
44 | |||
45 | # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 | ||
46 | anchors_p7_1280: | ||
47 | - [19,22, 54,36, 32,77] # P3/8 | ||
48 | - [70,83, 138,71, 75,173] # P4/16 | ||
49 | - [165,159, 148,334, 375,151] # P5/32 | ||
50 | - [334,317, 251,626, 499,474] # P6/64 | ||
51 | - [750,326, 534,814, 1079,818] # P7/128 | ||
52 | |||
53 | # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 | ||
54 | anchors_p7_1920: | ||
55 | - [29,34, 81,55, 47,115] # P3/8 | ||
56 | - [105,124, 207,107, 113,259] # P4/16 | ||
57 | - [247,238, 222,500, 563,227] # P5/32 | ||
58 | - [501,476, 376,939, 749,711] # P6/64 | ||
59 | - [1126,489, 801,1222, 1618,1227] # P7/128 |
models/hub/yolov3-spp.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # darknet53 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [32, 3, 1]], # 0 | ||
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 | ||
17 | [-1, 1, Bottleneck, [64]], | ||
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 | ||
19 | [-1, 2, Bottleneck, [128]], | ||
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 | ||
21 | [-1, 8, Bottleneck, [256]], | ||
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 | ||
23 | [-1, 8, Bottleneck, [512]], | ||
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 | ||
25 | [-1, 4, Bottleneck, [1024]], # 10 | ||
26 | ] | ||
27 | |||
28 | # YOLOv3-SPP head | ||
29 | head: | ||
30 | [[-1, 1, Bottleneck, [1024, False]], | ||
31 | [-1, 1, SPP, [512, [5, 9, 13]]], | ||
32 | [-1, 1, Conv, [1024, 3, 1]], | ||
33 | [-1, 1, Conv, [512, 1, 1]], | ||
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) | ||
35 | |||
36 | [-2, 1, Conv, [256, 1, 1]], | ||
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 | ||
39 | [-1, 1, Bottleneck, [512, False]], | ||
40 | [-1, 1, Bottleneck, [512, False]], | ||
41 | [-1, 1, Conv, [256, 1, 1]], | ||
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) | ||
43 | |||
44 | [-2, 1, Conv, [128, 1, 1]], | ||
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 | ||
47 | [-1, 1, Bottleneck, [256, False]], | ||
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) | ||
49 | |||
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
51 | ] |
models/hub/yolov3-tiny.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,14, 23,27, 37,58] # P4/16 | ||
9 | - [81,82, 135,169, 344,319] # P5/32 | ||
10 | |||
11 | # YOLOv3-tiny backbone | ||
12 | backbone: | ||
13 | # [from, number, module, args] | ||
14 | [[-1, 1, Conv, [16, 3, 1]], # 0 | ||
15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 | ||
16 | [-1, 1, Conv, [32, 3, 1]], | ||
17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 | ||
18 | [-1, 1, Conv, [64, 3, 1]], | ||
19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 | ||
20 | [-1, 1, Conv, [128, 3, 1]], | ||
21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 | ||
22 | [-1, 1, Conv, [256, 3, 1]], | ||
23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 | ||
24 | [-1, 1, Conv, [512, 3, 1]], | ||
25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 | ||
26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 | ||
27 | ] | ||
28 | |||
29 | # YOLOv3-tiny head | ||
30 | head: | ||
31 | [[-1, 1, Conv, [1024, 3, 1]], | ||
32 | [-1, 1, Conv, [256, 1, 1]], | ||
33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) | ||
34 | |||
35 | [-2, 1, Conv, [128, 1, 1]], | ||
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 | ||
38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) | ||
39 | |||
40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) | ||
41 | ] |
models/hub/yolov3.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # darknet53 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [32, 3, 1]], # 0 | ||
16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 | ||
17 | [-1, 1, Bottleneck, [64]], | ||
18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 | ||
19 | [-1, 2, Bottleneck, [128]], | ||
20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 | ||
21 | [-1, 8, Bottleneck, [256]], | ||
22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 | ||
23 | [-1, 8, Bottleneck, [512]], | ||
24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 | ||
25 | [-1, 4, Bottleneck, [1024]], # 10 | ||
26 | ] | ||
27 | |||
28 | # YOLOv3 head | ||
29 | head: | ||
30 | [[-1, 1, Bottleneck, [1024, False]], | ||
31 | [-1, 1, Conv, [512, 1, 1]], | ||
32 | [-1, 1, Conv, [1024, 3, 1]], | ||
33 | [-1, 1, Conv, [512, 1, 1]], | ||
34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) | ||
35 | |||
36 | [-2, 1, Conv, [256, 1, 1]], | ||
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 | ||
39 | [-1, 1, Bottleneck, [512, False]], | ||
40 | [-1, 1, Bottleneck, [512, False]], | ||
41 | [-1, 1, Conv, [256, 1, 1]], | ||
42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) | ||
43 | |||
44 | [-2, 1, Conv, [128, 1, 1]], | ||
45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 | ||
47 | [-1, 1, Bottleneck, [256, False]], | ||
48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) | ||
49 | |||
50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
51 | ] |
models/hub/yolov5-bifpn.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 BiFPN head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/hub/yolov5-fpn.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 FPN head | ||
28 | head: | ||
29 | [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) | ||
30 | |||
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
33 | [-1, 1, Conv, [512, 1, 1]], | ||
34 | [-1, 3, C3, [512, False]], # 14 (P4/16-medium) | ||
35 | |||
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
38 | [-1, 1, Conv, [256, 1, 1]], | ||
39 | [-1, 3, C3, [256, False]], # 18 (P3/8-small) | ||
40 | |||
41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
42 | ] |
models/hub/yolov5-p2.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer | ||
8 | |||
9 | # YOLOv5 v6.0 backbone | ||
10 | backbone: | ||
11 | # [from, number, module, args] | ||
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
14 | [-1, 3, C3, [128]], | ||
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
16 | [-1, 6, C3, [256]], | ||
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
18 | [-1, 9, C3, [512]], | ||
19 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
20 | [-1, 3, C3, [1024]], | ||
21 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
22 | ] | ||
23 | |||
24 | # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs | ||
25 | head: | ||
26 | [[-1, 1, Conv, [512, 1, 1]], | ||
27 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
28 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
29 | [-1, 3, C3, [512, False]], # 13 | ||
30 | |||
31 | [-1, 1, Conv, [256, 1, 1]], | ||
32 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
33 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
34 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
35 | |||
36 | [-1, 1, Conv, [128, 1, 1]], | ||
37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
38 | [[-1, 2], 1, Concat, [1]], # cat backbone P2 | ||
39 | [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) | ||
40 | |||
41 | [-1, 1, Conv, [128, 3, 2]], | ||
42 | [[-1, 18], 1, Concat, [1]], # cat head P3 | ||
43 | [-1, 3, C3, [256, False]], # 24 (P3/8-small) | ||
44 | |||
45 | [-1, 1, Conv, [256, 3, 2]], | ||
46 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
47 | [-1, 3, C3, [512, False]], # 27 (P4/16-medium) | ||
48 | |||
49 | [-1, 1, Conv, [512, 3, 2]], | ||
50 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
51 | [-1, 3, C3, [1024, False]], # 30 (P5/32-large) | ||
52 | |||
53 | [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) | ||
54 | ] |
models/hub/yolov5-p34.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.50 # layer channel multiple | ||
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer | ||
8 | |||
9 | # YOLOv5 v6.0 backbone | ||
10 | backbone: | ||
11 | # [from, number, module, args] | ||
12 | [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 | ||
13 | [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 | ||
14 | [ -1, 3, C3, [ 128 ] ], | ||
15 | [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 | ||
16 | [ -1, 6, C3, [ 256 ] ], | ||
17 | [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 | ||
18 | [ -1, 9, C3, [ 512 ] ], | ||
19 | [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 | ||
20 | [ -1, 3, C3, [ 1024 ] ], | ||
21 | [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 | ||
22 | ] | ||
23 | |||
24 | # YOLOv5 v6.0 head with (P3, P4) outputs | ||
25 | head: | ||
26 | [ [ -1, 1, Conv, [ 512, 1, 1 ] ], | ||
27 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], | ||
28 | [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 | ||
29 | [ -1, 3, C3, [ 512, False ] ], # 13 | ||
30 | |||
31 | [ -1, 1, Conv, [ 256, 1, 1 ] ], | ||
32 | [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], | ||
33 | [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 | ||
34 | [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) | ||
35 | |||
36 | [ -1, 1, Conv, [ 256, 3, 2 ] ], | ||
37 | [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 | ||
38 | [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) | ||
39 | |||
40 | [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) | ||
41 | ] |
models/hub/yolov5-p6.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer | ||
8 | |||
9 | # YOLOv5 v6.0 backbone | ||
10 | backbone: | ||
11 | # [from, number, module, args] | ||
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
14 | [-1, 3, C3, [128]], | ||
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
16 | [-1, 6, C3, [256]], | ||
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
18 | [-1, 9, C3, [512]], | ||
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
20 | [-1, 3, C3, [768]], | ||
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
22 | [-1, 3, C3, [1024]], | ||
23 | [-1, 1, SPPF, [1024, 5]], # 11 | ||
24 | ] | ||
25 | |||
26 | # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs | ||
27 | head: | ||
28 | [[-1, 1, Conv, [768, 1, 1]], | ||
29 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
30 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
31 | [-1, 3, C3, [768, False]], # 15 | ||
32 | |||
33 | [-1, 1, Conv, [512, 1, 1]], | ||
34 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
35 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
36 | [-1, 3, C3, [512, False]], # 19 | ||
37 | |||
38 | [-1, 1, Conv, [256, 1, 1]], | ||
39 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
40 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
41 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) | ||
42 | |||
43 | [-1, 1, Conv, [256, 3, 2]], | ||
44 | [[-1, 20], 1, Concat, [1]], # cat head P4 | ||
45 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) | ||
46 | |||
47 | [-1, 1, Conv, [512, 3, 2]], | ||
48 | [[-1, 16], 1, Concat, [1]], # cat head P5 | ||
49 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) | ||
50 | |||
51 | [-1, 1, Conv, [768, 3, 2]], | ||
52 | [[-1, 12], 1, Concat, [1]], # cat head P6 | ||
53 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) | ||
54 | |||
55 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) | ||
56 | ] |
models/hub/yolov5-p7.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: 3 # AutoAnchor evolves 3 anchors per P output layer | ||
8 | |||
9 | # YOLOv5 v6.0 backbone | ||
10 | backbone: | ||
11 | # [from, number, module, args] | ||
12 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
13 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
14 | [-1, 3, C3, [128]], | ||
15 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
16 | [-1, 6, C3, [256]], | ||
17 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
18 | [-1, 9, C3, [512]], | ||
19 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
20 | [-1, 3, C3, [768]], | ||
21 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
22 | [-1, 3, C3, [1024]], | ||
23 | [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 | ||
24 | [-1, 3, C3, [1280]], | ||
25 | [-1, 1, SPPF, [1280, 5]], # 13 | ||
26 | ] | ||
27 | |||
28 | # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs | ||
29 | head: | ||
30 | [[-1, 1, Conv, [1024, 1, 1]], | ||
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
32 | [[-1, 10], 1, Concat, [1]], # cat backbone P6 | ||
33 | [-1, 3, C3, [1024, False]], # 17 | ||
34 | |||
35 | [-1, 1, Conv, [768, 1, 1]], | ||
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
37 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
38 | [-1, 3, C3, [768, False]], # 21 | ||
39 | |||
40 | [-1, 1, Conv, [512, 1, 1]], | ||
41 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
42 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
43 | [-1, 3, C3, [512, False]], # 25 | ||
44 | |||
45 | [-1, 1, Conv, [256, 1, 1]], | ||
46 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
47 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
48 | [-1, 3, C3, [256, False]], # 29 (P3/8-small) | ||
49 | |||
50 | [-1, 1, Conv, [256, 3, 2]], | ||
51 | [[-1, 26], 1, Concat, [1]], # cat head P4 | ||
52 | [-1, 3, C3, [512, False]], # 32 (P4/16-medium) | ||
53 | |||
54 | [-1, 1, Conv, [512, 3, 2]], | ||
55 | [[-1, 22], 1, Concat, [1]], # cat head P5 | ||
56 | [-1, 3, C3, [768, False]], # 35 (P5/32-large) | ||
57 | |||
58 | [-1, 1, Conv, [768, 3, 2]], | ||
59 | [[-1, 18], 1, Concat, [1]], # cat head P6 | ||
60 | [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) | ||
61 | |||
62 | [-1, 1, Conv, [1024, 3, 2]], | ||
63 | [[-1, 14], 1, Concat, [1]], # cat head P7 | ||
64 | [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) | ||
65 | |||
66 | [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) | ||
67 | ] |
models/hub/yolov5-panet.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 PANet head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/hub/yolov5l6.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [19,27, 44,40, 38,94] # P3/8 | ||
9 | - [96,68, 86,152, 180,137] # P4/16 | ||
10 | - [140,301, 303,264, 238,542] # P5/32 | ||
11 | - [436,615, 739,380, 925,792] # P6/64 | ||
12 | |||
13 | # YOLOv5 v6.0 backbone | ||
14 | backbone: | ||
15 | # [from, number, module, args] | ||
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
18 | [-1, 3, C3, [128]], | ||
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
20 | [-1, 6, C3, [256]], | ||
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
22 | [-1, 9, C3, [512]], | ||
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
24 | [-1, 3, C3, [768]], | ||
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
26 | [-1, 3, C3, [1024]], | ||
27 | [-1, 1, SPPF, [1024, 5]], # 11 | ||
28 | ] | ||
29 | |||
30 | # YOLOv5 v6.0 head | ||
31 | head: | ||
32 | [[-1, 1, Conv, [768, 1, 1]], | ||
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
35 | [-1, 3, C3, [768, False]], # 15 | ||
36 | |||
37 | [-1, 1, Conv, [512, 1, 1]], | ||
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
40 | [-1, 3, C3, [512, False]], # 19 | ||
41 | |||
42 | [-1, 1, Conv, [256, 1, 1]], | ||
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) | ||
46 | |||
47 | [-1, 1, Conv, [256, 3, 2]], | ||
48 | [[-1, 20], 1, Concat, [1]], # cat head P4 | ||
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) | ||
50 | |||
51 | [-1, 1, Conv, [512, 3, 2]], | ||
52 | [[-1, 16], 1, Concat, [1]], # cat head P5 | ||
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) | ||
54 | |||
55 | [-1, 1, Conv, [768, 3, 2]], | ||
56 | [[-1, 12], 1, Concat, [1]], # cat head P6 | ||
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) | ||
58 | |||
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) | ||
60 | ] |
models/hub/yolov5m6.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.67 # model depth multiple | ||
6 | width_multiple: 0.75 # layer channel multiple | ||
7 | anchors: | ||
8 | - [19,27, 44,40, 38,94] # P3/8 | ||
9 | - [96,68, 86,152, 180,137] # P4/16 | ||
10 | - [140,301, 303,264, 238,542] # P5/32 | ||
11 | - [436,615, 739,380, 925,792] # P6/64 | ||
12 | |||
13 | # YOLOv5 v6.0 backbone | ||
14 | backbone: | ||
15 | # [from, number, module, args] | ||
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
18 | [-1, 3, C3, [128]], | ||
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
20 | [-1, 6, C3, [256]], | ||
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
22 | [-1, 9, C3, [512]], | ||
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
24 | [-1, 3, C3, [768]], | ||
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
26 | [-1, 3, C3, [1024]], | ||
27 | [-1, 1, SPPF, [1024, 5]], # 11 | ||
28 | ] | ||
29 | |||
30 | # YOLOv5 v6.0 head | ||
31 | head: | ||
32 | [[-1, 1, Conv, [768, 1, 1]], | ||
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
35 | [-1, 3, C3, [768, False]], # 15 | ||
36 | |||
37 | [-1, 1, Conv, [512, 1, 1]], | ||
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
40 | [-1, 3, C3, [512, False]], # 19 | ||
41 | |||
42 | [-1, 1, Conv, [256, 1, 1]], | ||
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) | ||
46 | |||
47 | [-1, 1, Conv, [256, 3, 2]], | ||
48 | [[-1, 20], 1, Concat, [1]], # cat head P4 | ||
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) | ||
50 | |||
51 | [-1, 1, Conv, [512, 3, 2]], | ||
52 | [[-1, 16], 1, Concat, [1]], # cat head P5 | ||
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) | ||
54 | |||
55 | [-1, 1, Conv, [768, 3, 2]], | ||
56 | [[-1, 12], 1, Concat, [1]], # cat head P6 | ||
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) | ||
58 | |||
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) | ||
60 | ] |
models/hub/yolov5n6.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.25 # layer channel multiple | ||
7 | anchors: | ||
8 | - [19,27, 44,40, 38,94] # P3/8 | ||
9 | - [96,68, 86,152, 180,137] # P4/16 | ||
10 | - [140,301, 303,264, 238,542] # P5/32 | ||
11 | - [436,615, 739,380, 925,792] # P6/64 | ||
12 | |||
13 | # YOLOv5 v6.0 backbone | ||
14 | backbone: | ||
15 | # [from, number, module, args] | ||
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
18 | [-1, 3, C3, [128]], | ||
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
20 | [-1, 6, C3, [256]], | ||
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
22 | [-1, 9, C3, [512]], | ||
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
24 | [-1, 3, C3, [768]], | ||
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
26 | [-1, 3, C3, [1024]], | ||
27 | [-1, 1, SPPF, [1024, 5]], # 11 | ||
28 | ] | ||
29 | |||
30 | # YOLOv5 v6.0 head | ||
31 | head: | ||
32 | [[-1, 1, Conv, [768, 1, 1]], | ||
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
35 | [-1, 3, C3, [768, False]], # 15 | ||
36 | |||
37 | [-1, 1, Conv, [512, 1, 1]], | ||
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
40 | [-1, 3, C3, [512, False]], # 19 | ||
41 | |||
42 | [-1, 1, Conv, [256, 1, 1]], | ||
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) | ||
46 | |||
47 | [-1, 1, Conv, [256, 3, 2]], | ||
48 | [[-1, 20], 1, Concat, [1]], # cat head P4 | ||
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) | ||
50 | |||
51 | [-1, 1, Conv, [512, 3, 2]], | ||
52 | [[-1, 16], 1, Concat, [1]], # cat head P5 | ||
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) | ||
54 | |||
55 | [-1, 1, Conv, [768, 3, 2]], | ||
56 | [[-1, 12], 1, Concat, [1]], # cat head P6 | ||
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) | ||
58 | |||
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) | ||
60 | ] |
models/hub/yolov5s-LeakyReLU.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model | ||
6 | depth_multiple: 0.33 # model depth multiple | ||
7 | width_multiple: 0.50 # layer channel multiple | ||
8 | anchors: | ||
9 | - [10,13, 16,30, 33,23] # P3/8 | ||
10 | - [30,61, 62,45, 59,119] # P4/16 | ||
11 | - [116,90, 156,198, 373,326] # P5/32 | ||
12 | |||
13 | # YOLOv5 v6.0 backbone | ||
14 | backbone: | ||
15 | # [from, number, module, args] | ||
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
18 | [-1, 3, C3, [128]], | ||
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
20 | [-1, 6, C3, [256]], | ||
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
22 | [-1, 9, C3, [512]], | ||
23 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
24 | [-1, 3, C3, [1024]], | ||
25 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
26 | ] | ||
27 | |||
28 | # YOLOv5 v6.0 head | ||
29 | head: | ||
30 | [[-1, 1, Conv, [512, 1, 1]], | ||
31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
33 | [-1, 3, C3, [512, False]], # 13 | ||
34 | |||
35 | [-1, 1, Conv, [256, 1, 1]], | ||
36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
38 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
39 | |||
40 | [-1, 1, Conv, [256, 3, 2]], | ||
41 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
42 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
43 | |||
44 | [-1, 1, Conv, [512, 3, 2]], | ||
45 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
46 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
47 | |||
48 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
49 | ] |
models/hub/yolov5s-ghost.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.50 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3Ghost, [128]], | ||
18 | [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3Ghost, [256]], | ||
20 | [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3Ghost, [512]], | ||
22 | [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3Ghost, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, GhostConv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3Ghost, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, GhostConv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, GhostConv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, GhostConv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/hub/yolov5s-transformer.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.50 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/hub/yolov5s6.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.50 # layer channel multiple | ||
7 | anchors: | ||
8 | - [19,27, 44,40, 38,94] # P3/8 | ||
9 | - [96,68, 86,152, 180,137] # P4/16 | ||
10 | - [140,301, 303,264, 238,542] # P5/32 | ||
11 | - [436,615, 739,380, 925,792] # P6/64 | ||
12 | |||
13 | # YOLOv5 v6.0 backbone | ||
14 | backbone: | ||
15 | # [from, number, module, args] | ||
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
18 | [-1, 3, C3, [128]], | ||
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
20 | [-1, 6, C3, [256]], | ||
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
22 | [-1, 9, C3, [512]], | ||
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
24 | [-1, 3, C3, [768]], | ||
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
26 | [-1, 3, C3, [1024]], | ||
27 | [-1, 1, SPPF, [1024, 5]], # 11 | ||
28 | ] | ||
29 | |||
30 | # YOLOv5 v6.0 head | ||
31 | head: | ||
32 | [[-1, 1, Conv, [768, 1, 1]], | ||
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
35 | [-1, 3, C3, [768, False]], # 15 | ||
36 | |||
37 | [-1, 1, Conv, [512, 1, 1]], | ||
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
40 | [-1, 3, C3, [512, False]], # 19 | ||
41 | |||
42 | [-1, 1, Conv, [256, 1, 1]], | ||
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) | ||
46 | |||
47 | [-1, 1, Conv, [256, 3, 2]], | ||
48 | [[-1, 20], 1, Concat, [1]], # cat head P4 | ||
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) | ||
50 | |||
51 | [-1, 1, Conv, [512, 3, 2]], | ||
52 | [[-1, 16], 1, Concat, [1]], # cat head P5 | ||
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) | ||
54 | |||
55 | [-1, 1, Conv, [768, 3, 2]], | ||
56 | [[-1, 12], 1, Concat, [1]], # cat head P6 | ||
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) | ||
58 | |||
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) | ||
60 | ] |
models/hub/yolov5x6.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.33 # model depth multiple | ||
6 | width_multiple: 1.25 # layer channel multiple | ||
7 | anchors: | ||
8 | - [19,27, 44,40, 38,94] # P3/8 | ||
9 | - [96,68, 86,152, 180,137] # P4/16 | ||
10 | - [140,301, 303,264, 238,542] # P5/32 | ||
11 | - [436,615, 739,380, 925,792] # P6/64 | ||
12 | |||
13 | # YOLOv5 v6.0 backbone | ||
14 | backbone: | ||
15 | # [from, number, module, args] | ||
16 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
17 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
18 | [-1, 3, C3, [128]], | ||
19 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
20 | [-1, 6, C3, [256]], | ||
21 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
22 | [-1, 9, C3, [512]], | ||
23 | [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 | ||
24 | [-1, 3, C3, [768]], | ||
25 | [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 | ||
26 | [-1, 3, C3, [1024]], | ||
27 | [-1, 1, SPPF, [1024, 5]], # 11 | ||
28 | ] | ||
29 | |||
30 | # YOLOv5 v6.0 head | ||
31 | head: | ||
32 | [[-1, 1, Conv, [768, 1, 1]], | ||
33 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
34 | [[-1, 8], 1, Concat, [1]], # cat backbone P5 | ||
35 | [-1, 3, C3, [768, False]], # 15 | ||
36 | |||
37 | [-1, 1, Conv, [512, 1, 1]], | ||
38 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
39 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
40 | [-1, 3, C3, [512, False]], # 19 | ||
41 | |||
42 | [-1, 1, Conv, [256, 1, 1]], | ||
43 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
44 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
45 | [-1, 3, C3, [256, False]], # 23 (P3/8-small) | ||
46 | |||
47 | [-1, 1, Conv, [256, 3, 2]], | ||
48 | [[-1, 20], 1, Concat, [1]], # cat head P4 | ||
49 | [-1, 3, C3, [512, False]], # 26 (P4/16-medium) | ||
50 | |||
51 | [-1, 1, Conv, [512, 3, 2]], | ||
52 | [[-1, 16], 1, Concat, [1]], # cat head P5 | ||
53 | [-1, 3, C3, [768, False]], # 29 (P5/32-large) | ||
54 | |||
55 | [-1, 1, Conv, [768, 3, 2]], | ||
56 | [[-1, 12], 1, Concat, [1]], # cat head P6 | ||
57 | [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) | ||
58 | |||
59 | [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) | ||
60 | ] |
models/segment/yolov5l-seg.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) | ||
48 | ] |
models/segment/yolov5m-seg.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.67 # model depth multiple | ||
6 | width_multiple: 0.75 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) | ||
48 | ] | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
models/segment/yolov5n-seg.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.25 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) | ||
48 | ] |
models/segment/yolov5s-seg.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.5 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) | ||
48 | ] | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
models/segment/yolov5x-seg.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.33 # model depth multiple | ||
6 | width_multiple: 1.25 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) | ||
48 | ] |
models/tf.py
0 → 100644
This diff is collapsed.
Click to expand it.
models/yolo.py
0 → 100644
This diff is collapsed.
Click to expand it.
models/yolov5_config.py
0 → 100644
1 | from easydict import EasyDict as edict | ||
2 | |||
3 | config = edict( | ||
4 | # weights='/home/situ/qfs/invoice_tamper/09_project/project/yolov5_inference/runs/exp2/weights/best.pt', # model path or triton URL | ||
5 | weights='runs/train/exp/weights/best.onnx', # model path or triton URL | ||
6 | data='data/VOC.yaml', # dataset.yaml path | ||
7 | imgsz=(640, 640), # inference size (height, width) | ||
8 | conf_thres=0.2, # confidence threshold | ||
9 | iou_thres=0.45, # NMS IOU threshold | ||
10 | max_det=1000, # maximum detections per image | ||
11 | device='' # cuda device, i.e. 0 or 0,1,2,3 or cpu | ||
12 | ) |
models/yolov5l.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.0 # model depth multiple | ||
6 | width_multiple: 1.0 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/yolov5m.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.67 # model depth multiple | ||
6 | width_multiple: 0.75 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/yolov5n.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.25 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/yolov5s.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 0.33 # model depth multiple | ||
6 | width_multiple: 0.50 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
models/yolov5x.yaml
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | |||
3 | # Parameters | ||
4 | nc: 80 # number of classes | ||
5 | depth_multiple: 1.33 # model depth multiple | ||
6 | width_multiple: 1.25 # layer channel multiple | ||
7 | anchors: | ||
8 | - [10,13, 16,30, 33,23] # P3/8 | ||
9 | - [30,61, 62,45, 59,119] # P4/16 | ||
10 | - [116,90, 156,198, 373,326] # P5/32 | ||
11 | |||
12 | # YOLOv5 v6.0 backbone | ||
13 | backbone: | ||
14 | # [from, number, module, args] | ||
15 | [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 | ||
16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 | ||
17 | [-1, 3, C3, [128]], | ||
18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 | ||
19 | [-1, 6, C3, [256]], | ||
20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 | ||
21 | [-1, 9, C3, [512]], | ||
22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 | ||
23 | [-1, 3, C3, [1024]], | ||
24 | [-1, 1, SPPF, [1024, 5]], # 9 | ||
25 | ] | ||
26 | |||
27 | # YOLOv5 v6.0 head | ||
28 | head: | ||
29 | [[-1, 1, Conv, [512, 1, 1]], | ||
30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 | ||
32 | [-1, 3, C3, [512, False]], # 13 | ||
33 | |||
34 | [-1, 1, Conv, [256, 1, 1]], | ||
35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], | ||
36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 | ||
37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) | ||
38 | |||
39 | [-1, 1, Conv, [256, 3, 2]], | ||
40 | [[-1, 14], 1, Concat, [1]], # cat head P4 | ||
41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) | ||
42 | |||
43 | [-1, 1, Conv, [512, 3, 2]], | ||
44 | [[-1, 10], 1, Concat, [1]], # cat head P5 | ||
45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) | ||
46 | |||
47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) | ||
48 | ] |
pipeline.py
0 → 100644
1 | import time | ||
2 | import cv2 | ||
3 | from bank_ocr_inference import bill_ocr, extract_bank_info | ||
4 | from inference import Yolov5 | ||
5 | from models.yolov5_config import config | ||
6 | |||
7 | |||
8 | def enlarge_position(box): | ||
9 | x1, y1, x2, y2 = box | ||
10 | w, h = abs(x2 - x1), abs(y2 - y1) | ||
11 | y1, y2 = max(y1 - h // 3, 0), y2 + h // 3 | ||
12 | x1, x2 = max(x1 - w // 8, 0), x2 + w // 8 | ||
13 | return [x1, y1, x2, y2] | ||
14 | |||
15 | |||
16 | def tamper_detect(image): | ||
17 | st = time.time() | ||
18 | ocr_results = bill_ocr(image) | ||
19 | et1 = time.time() | ||
20 | info_results = extract_bank_info(ocr_results) | ||
21 | et2 = time.time() | ||
22 | tamper_results = [] | ||
23 | if len(info_results) != 0: | ||
24 | for info_result in info_results: | ||
25 | box = [info_result[1][0], info_result[1][1], info_result[1][4], info_result[1][5]] | ||
26 | x1, y1, x2, y2 = enlarge_position(box) | ||
27 | # x1, y1, x2, y2 = box | ||
28 | info_image = image[y1:y2, x1:x2, :] | ||
29 | # cv2.imshow('info_image', info_image) | ||
30 | results = detector.detect(info_image) | ||
31 | print(results) | ||
32 | if len(results['result']) != 0: | ||
33 | for res in results['result']: | ||
34 | left = int(res['left']) | ||
35 | top = int(res['top']) | ||
36 | width = int(res['width']) | ||
37 | height = int(res['height']) | ||
38 | absolute_position = [x1 + left, y1 + top, x1 + left + width, y1 + top + height] | ||
39 | tamper_results .append(absolute_position) | ||
40 | print(tamper_results) | ||
41 | et3 = time.time() | ||
42 | |||
43 | print(f'all:{et3 - st} ocr:{et1 - st} extract:{et2 - et1} yolo:{et3 - et2}') | ||
44 | for i in tamper_results: | ||
45 | cv2.rectangle(image, tuple(i[:2]), tuple(i[2:]), (0, 0, 255), 2) | ||
46 | cv2.imshow('info', image) | ||
47 | cv2.waitKey(0) | ||
48 | |||
49 | |||
50 | if __name__ == '__main__': | ||
51 | detector = Yolov5(config) | ||
52 | image = cv2.imread( | ||
53 | "/home/situ/下载/农行/_1594626795.5104141page_18_img_0-.jpg") | ||
54 | tamper_detect(image) |
plot_sourece_labels.py
0 → 100644
1 | import os | ||
2 | |||
3 | import cv2 | ||
4 | import numpy as np | ||
5 | import pandas as pd | ||
6 | import tqdm | ||
7 | |||
8 | |||
9 | def get_source_image_det(crop_position, predict_positions): | ||
10 | result = [] | ||
11 | x1, y1, x2, y2 = crop_position | ||
12 | for p in predict_positions: | ||
13 | px1, py1, px2, py2, score = p | ||
14 | w, h = px2 - px1, py2 - py1 | ||
15 | result.append([x1 + px1, y1 + py1, x1 + px1 + w, y1 + py1 + h, score]) | ||
16 | return result | ||
17 | |||
18 | |||
19 | def decode_label(image, label_path): | ||
20 | data = open(label_path).readlines() | ||
21 | h, w, c = image.shape | ||
22 | result = [] | ||
23 | for d in data: | ||
24 | d = [float(i) for i in d.strip().split(' ')] | ||
25 | cls, cx, cy, cw, ch, score = d | ||
26 | cx, cy, cw, ch = cx * w, cy * h, cw * w, ch * h | ||
27 | result.append([int(cx - cw // 2), int(cy - ch // 2), int(cx + cw // 2), int(cy + ch // 2), score]) | ||
28 | return result | ||
29 | |||
30 | |||
31 | if __name__ == '__main__': | ||
32 | source_image_path = '/data/situ_invoice_bill_data/new_data/qfs_bank_bill_data/gongshang/authentic/images/val' | ||
33 | val_image_path = '/home/situ/qfs/invoice_tamper/09_project/project/tamper_det/data/images/crop_img' | ||
34 | predict_label_path = '/home/situ/qfs/invoice_tamper/09_project/project/tamper_det/runs/detect/exp/labels' | ||
35 | crop_csv_path = '/data/situ_invoice_bill_data/new_data/qfs_bank_bill_data/gongshang/croped_merge.csv' | ||
36 | predict_labels = os.listdir(predict_label_path) | ||
37 | source_images = os.listdir(source_image_path) | ||
38 | data = pd.read_csv(crop_csv_path) | ||
39 | img_name = data.loc[:, 'img_name'].tolist() | ||
40 | crop_position1 = data.loc[:, 'name_crop_coord'].tolist() | ||
41 | crop_position2 = data.loc[:, 'number_crop_coord'].tolist() | ||
42 | cc = '/data/situ_invoice_bill_data/new_data/qfs_bank_bill_data/gongshang/tampered/images/val/ps3' | ||
43 | for im in os.listdir(cc): | ||
44 | print(im) | ||
45 | img = cv2.imread(os.path.join(cc, im)) | ||
46 | img_ = img.copy() | ||
47 | id = img_name.index(im) | ||
48 | name_crop_position = [int(i) for i in crop_position1[id].split(',')] | ||
49 | number_crop_position = [int(i) for i in crop_position2[id].split(',')] | ||
50 | nx1, ny1, nx2, ny2 = name_crop_position | ||
51 | nux1, nuy1, nux2, nuy2 = number_crop_position | ||
52 | if im[:-4] + '_hname.txt' in predict_labels: | ||
53 | |||
54 | h, w, c = img[ny1:ny2, nx1:nx2, :].shape | ||
55 | data = open(os.path.join(predict_label_path, im[:-4] + '_hname.txt')).readlines() | ||
56 | for d in data: | ||
57 | cls, cx, cy, cw, ch, score = [float(i) for i in d.strip().split(' ')] | ||
58 | cx, cy, cw, ch = int(cx * w), int(cy * h), int(cw * w), int(ch * h) | ||
59 | cx1, cy1 = cx - cw // 2, cy - ch // 2 | ||
60 | x1, y1, x2, y2 = nx1 + cx1, ny1 + cy1, nx1 + cx1 + cw, ny1 + cy1 + ch | ||
61 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2) | ||
62 | cv2.putText(img, f'tampered:{score}', (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) | ||
63 | if im[:-4] + '_hnumber.txt' in predict_labels: | ||
64 | h, w, c = img[nuy1:nuy2, nux1:nux2, :].shape | ||
65 | data = open(os.path.join(predict_label_path, im[:-4] + '_hname.txt')).readlines() | ||
66 | for d in data: | ||
67 | cls, cx, cy, cw, ch, score = [float(i) for i in d.strip().split(' ')] | ||
68 | cx, cy, cw, ch = int(cx * w), int(cy * h), int(cw * w), int(ch * h) | ||
69 | cx1, cy1 = cx - cw // 2, cy - ch // 2 | ||
70 | x1, y1, x2, y2 = nux1 + cx1, nuy1 + cy1, nux1 + cx1 + cw, nuy1 + cy1 + ch | ||
71 | cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2) | ||
72 | cv2.putText(img, f'tampered:{score}', (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) | ||
73 | result = np.vstack((img_, img)) | ||
74 | cv2.imwrite(f'z/{im}', result) |
requirements.txt
0 → 100644
1 | # YOLOv5 requirements | ||
2 | # Usage: pip install -r requirements.txt | ||
3 | |||
4 | # Base ---------------------------------------- | ||
5 | matplotlib>=3.2.2 | ||
6 | numpy>=1.18.5 | ||
7 | opencv-python>=4.1.1 | ||
8 | Pillow>=7.1.2 | ||
9 | PyYAML>=5.3.1 | ||
10 | requests>=2.23.0 | ||
11 | scipy>=1.4.1 | ||
12 | torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) | ||
13 | torchvision>=0.8.1 | ||
14 | tqdm>=4.64.0 | ||
15 | # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 | ||
16 | |||
17 | # Logging ------------------------------------- | ||
18 | tensorboard>=2.4.1 | ||
19 | # wandb | ||
20 | # clearml | ||
21 | |||
22 | # Plotting ------------------------------------ | ||
23 | pandas>=1.1.4 | ||
24 | seaborn>=0.11.0 | ||
25 | |||
26 | # Export -------------------------------------- | ||
27 | # coremltools>=6.0 # CoreML export | ||
28 | # onnx>=1.9.0 # ONNX export | ||
29 | # onnx-simplifier>=0.4.1 # ONNX simplifier | ||
30 | # nvidia-pyindex # TensorRT export | ||
31 | # nvidia-tensorrt # TensorRT export | ||
32 | # scikit-learn<=1.1.2 # CoreML quantization | ||
33 | # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) | ||
34 | # tensorflowjs>=3.9.0 # TF.js export | ||
35 | # openvino-dev # OpenVINO export | ||
36 | |||
37 | # Deploy -------------------------------------- | ||
38 | # tritonclient[all]~=2.24.0 | ||
39 | |||
40 | # Extras -------------------------------------- | ||
41 | ipython # interactive notebook | ||
42 | psutil # system utilization | ||
43 | thop>=0.1.1 # FLOPs computation | ||
44 | # mss # screenshots | ||
45 | # albumentations>=1.0.3 | ||
46 | # pycocotools>=2.0 # COCO mAP | ||
47 | # roboflow |
train.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/__init__.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | utils/initialization | ||
4 | """ | ||
5 | |||
6 | import contextlib | ||
7 | import platform | ||
8 | import threading | ||
9 | |||
10 | |||
11 | def emojis(str=''): | ||
12 | # Return platform-dependent emoji-safe version of string | ||
13 | return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str | ||
14 | |||
15 | |||
16 | class TryExcept(contextlib.ContextDecorator): | ||
17 | # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager | ||
18 | def __init__(self, msg=''): | ||
19 | self.msg = msg | ||
20 | |||
21 | def __enter__(self): | ||
22 | pass | ||
23 | |||
24 | def __exit__(self, exc_type, value, traceback): | ||
25 | if value: | ||
26 | print(emojis(f'{self.msg}{value}')) | ||
27 | return True | ||
28 | |||
29 | |||
30 | def threaded(func): | ||
31 | # Multi-threads a target function and returns thread. Usage: @threaded decorator | ||
32 | def wrapper(*args, **kwargs): | ||
33 | thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) | ||
34 | thread.start() | ||
35 | return thread | ||
36 | |||
37 | return wrapper | ||
38 | |||
39 | |||
40 | def notebook_init(verbose=True): | ||
41 | # Check system software and hardware | ||
42 | print('Checking setup...') | ||
43 | |||
44 | import os | ||
45 | import shutil | ||
46 | |||
47 | from utils.general import check_font, check_requirements, is_colab | ||
48 | from utils.torch_utils import select_device # imports | ||
49 | |||
50 | check_requirements(('psutil', 'IPython')) | ||
51 | check_font() | ||
52 | |||
53 | import psutil | ||
54 | from IPython import display # to display images and clear console output | ||
55 | |||
56 | if is_colab(): | ||
57 | shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory | ||
58 | |||
59 | # System info | ||
60 | if verbose: | ||
61 | gb = 1 << 30 # bytes to GiB (1024 ** 3) | ||
62 | ram = psutil.virtual_memory().total | ||
63 | total, used, free = shutil.disk_usage("/") | ||
64 | display.clear_output() | ||
65 | s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' | ||
66 | else: | ||
67 | s = '' | ||
68 | |||
69 | select_device(newline=False) | ||
70 | print(emojis(f'Setup complete ✅ {s}')) | ||
71 | return display |
utils/activations.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Activation functions | ||
4 | """ | ||
5 | |||
6 | import torch | ||
7 | import torch.nn as nn | ||
8 | import torch.nn.functional as F | ||
9 | |||
10 | |||
11 | class SiLU(nn.Module): | ||
12 | # SiLU activation https://arxiv.org/pdf/1606.08415.pdf | ||
13 | @staticmethod | ||
14 | def forward(x): | ||
15 | return x * torch.sigmoid(x) | ||
16 | |||
17 | |||
18 | class Hardswish(nn.Module): | ||
19 | # Hard-SiLU activation | ||
20 | @staticmethod | ||
21 | def forward(x): | ||
22 | # return x * F.hardsigmoid(x) # for TorchScript and CoreML | ||
23 | return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX | ||
24 | |||
25 | |||
26 | class Mish(nn.Module): | ||
27 | # Mish activation https://github.com/digantamisra98/Mish | ||
28 | @staticmethod | ||
29 | def forward(x): | ||
30 | return x * F.softplus(x).tanh() | ||
31 | |||
32 | |||
33 | class MemoryEfficientMish(nn.Module): | ||
34 | # Mish activation memory-efficient | ||
35 | class F(torch.autograd.Function): | ||
36 | |||
37 | @staticmethod | ||
38 | def forward(ctx, x): | ||
39 | ctx.save_for_backward(x) | ||
40 | return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) | ||
41 | |||
42 | @staticmethod | ||
43 | def backward(ctx, grad_output): | ||
44 | x = ctx.saved_tensors[0] | ||
45 | sx = torch.sigmoid(x) | ||
46 | fx = F.softplus(x).tanh() | ||
47 | return grad_output * (fx + x * sx * (1 - fx * fx)) | ||
48 | |||
49 | def forward(self, x): | ||
50 | return self.F.apply(x) | ||
51 | |||
52 | |||
53 | class FReLU(nn.Module): | ||
54 | # FReLU activation https://arxiv.org/abs/2007.11824 | ||
55 | def __init__(self, c1, k=3): # ch_in, kernel | ||
56 | super().__init__() | ||
57 | self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) | ||
58 | self.bn = nn.BatchNorm2d(c1) | ||
59 | |||
60 | def forward(self, x): | ||
61 | return torch.max(x, self.bn(self.conv(x))) | ||
62 | |||
63 | |||
64 | class AconC(nn.Module): | ||
65 | r""" ACON activation (activate or not) | ||
66 | AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter | ||
67 | according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. | ||
68 | """ | ||
69 | |||
70 | def __init__(self, c1): | ||
71 | super().__init__() | ||
72 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
73 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
74 | self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) | ||
75 | |||
76 | def forward(self, x): | ||
77 | dpx = (self.p1 - self.p2) * x | ||
78 | return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x | ||
79 | |||
80 | |||
81 | class MetaAconC(nn.Module): | ||
82 | r""" ACON activation (activate or not) | ||
83 | MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network | ||
84 | according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. | ||
85 | """ | ||
86 | |||
87 | def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r | ||
88 | super().__init__() | ||
89 | c2 = max(r, c1 // r) | ||
90 | self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
91 | self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
92 | self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) | ||
93 | self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) | ||
94 | # self.bn1 = nn.BatchNorm2d(c2) | ||
95 | # self.bn2 = nn.BatchNorm2d(c1) | ||
96 | |||
97 | def forward(self, x): | ||
98 | y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) | ||
99 | # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 | ||
100 | # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable | ||
101 | beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed | ||
102 | dpx = (self.p1 - self.p2) * x | ||
103 | return dpx * torch.sigmoid(beta * dpx) + self.p2 * x |
utils/augmentations.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/autoanchor.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | AutoAnchor utils | ||
4 | """ | ||
5 | |||
6 | import random | ||
7 | |||
8 | import numpy as np | ||
9 | import torch | ||
10 | import yaml | ||
11 | from tqdm import tqdm | ||
12 | |||
13 | from utils import TryExcept | ||
14 | from utils.general import LOGGER, colorstr | ||
15 | |||
16 | PREFIX = colorstr('AutoAnchor: ') | ||
17 | |||
18 | |||
19 | def check_anchor_order(m): | ||
20 | # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary | ||
21 | a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer | ||
22 | da = a[-1] - a[0] # delta a | ||
23 | ds = m.stride[-1] - m.stride[0] # delta s | ||
24 | if da and (da.sign() != ds.sign()): # same order | ||
25 | LOGGER.info(f'{PREFIX}Reversing anchor order') | ||
26 | m.anchors[:] = m.anchors.flip(0) | ||
27 | |||
28 | |||
29 | @TryExcept(f'{PREFIX}ERROR: ') | ||
30 | def check_anchors(dataset, model, thr=4.0, imgsz=640): | ||
31 | # Check anchor fit to data, recompute if necessary | ||
32 | m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() | ||
33 | shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) | ||
34 | scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale | ||
35 | wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh | ||
36 | |||
37 | def metric(k): # compute metric | ||
38 | r = wh[:, None] / k[None] | ||
39 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric | ||
40 | best = x.max(1)[0] # best_x | ||
41 | aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold | ||
42 | bpr = (best > 1 / thr).float().mean() # best possible recall | ||
43 | return bpr, aat | ||
44 | |||
45 | stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides | ||
46 | anchors = m.anchors.clone() * stride # current anchors | ||
47 | bpr, aat = metric(anchors.cpu().view(-1, 2)) | ||
48 | s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' | ||
49 | if bpr > 0.98: # threshold to recompute | ||
50 | LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') | ||
51 | else: | ||
52 | LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') | ||
53 | na = m.anchors.numel() // 2 # number of anchors | ||
54 | anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) | ||
55 | new_bpr = metric(anchors)[0] | ||
56 | if new_bpr > bpr: # replace anchors | ||
57 | anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) | ||
58 | m.anchors[:] = anchors.clone().view_as(m.anchors) | ||
59 | check_anchor_order(m) # must be in pixel-space (not grid-space) | ||
60 | m.anchors /= stride | ||
61 | s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' | ||
62 | else: | ||
63 | s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' | ||
64 | LOGGER.info(s) | ||
65 | |||
66 | |||
67 | def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): | ||
68 | """ Creates kmeans-evolved anchors from training dataset | ||
69 | |||
70 | Arguments: | ||
71 | dataset: path to data.yaml, or a loaded dataset | ||
72 | n: number of anchors | ||
73 | img_size: image size used for training | ||
74 | thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 | ||
75 | gen: generations to evolve anchors using genetic algorithm | ||
76 | verbose: print all results | ||
77 | |||
78 | Return: | ||
79 | k: kmeans evolved anchors | ||
80 | |||
81 | Usage: | ||
82 | from utils.autoanchor import *; _ = kmean_anchors() | ||
83 | """ | ||
84 | from scipy.cluster.vq import kmeans | ||
85 | |||
86 | npr = np.random | ||
87 | thr = 1 / thr | ||
88 | |||
89 | def metric(k, wh): # compute metrics | ||
90 | r = wh[:, None] / k[None] | ||
91 | x = torch.min(r, 1 / r).min(2)[0] # ratio metric | ||
92 | # x = wh_iou(wh, torch.tensor(k)) # iou metric | ||
93 | return x, x.max(1)[0] # x, best_x | ||
94 | |||
95 | def anchor_fitness(k): # mutation fitness | ||
96 | _, best = metric(torch.tensor(k, dtype=torch.float32), wh) | ||
97 | return (best * (best > thr).float()).mean() # fitness | ||
98 | |||
99 | def print_results(k, verbose=True): | ||
100 | k = k[np.argsort(k.prod(1))] # sort small to large | ||
101 | x, best = metric(k, wh0) | ||
102 | bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr | ||
103 | s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ | ||
104 | f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ | ||
105 | f'past_thr={x[x > thr].mean():.3f}-mean: ' | ||
106 | for x in k: | ||
107 | s += '%i,%i, ' % (round(x[0]), round(x[1])) | ||
108 | if verbose: | ||
109 | LOGGER.info(s[:-2]) | ||
110 | return k | ||
111 | |||
112 | if isinstance(dataset, str): # *.yaml file | ||
113 | with open(dataset, errors='ignore') as f: | ||
114 | data_dict = yaml.safe_load(f) # model dict | ||
115 | from utils.dataloaders import LoadImagesAndLabels | ||
116 | dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) | ||
117 | |||
118 | # Get label wh | ||
119 | shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) | ||
120 | wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh | ||
121 | |||
122 | # Filter | ||
123 | i = (wh0 < 3.0).any(1).sum() | ||
124 | if i: | ||
125 | LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') | ||
126 | wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels | ||
127 | # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 | ||
128 | |||
129 | # Kmeans init | ||
130 | try: | ||
131 | LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') | ||
132 | assert n <= len(wh) # apply overdetermined constraint | ||
133 | s = wh.std(0) # sigmas for whitening | ||
134 | k = kmeans(wh / s, n, iter=30)[0] * s # points | ||
135 | assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar | ||
136 | except Exception: | ||
137 | LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') | ||
138 | k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init | ||
139 | wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) | ||
140 | k = print_results(k, verbose=False) | ||
141 | |||
142 | # Plot | ||
143 | # k, d = [None] * 20, [None] * 20 | ||
144 | # for i in tqdm(range(1, 21)): | ||
145 | # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance | ||
146 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) | ||
147 | # ax = ax.ravel() | ||
148 | # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') | ||
149 | # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh | ||
150 | # ax[0].hist(wh[wh[:, 0]<100, 0],400) | ||
151 | # ax[1].hist(wh[wh[:, 1]<100, 1],400) | ||
152 | # fig.savefig('wh.png', dpi=200) | ||
153 | |||
154 | # Evolve | ||
155 | f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma | ||
156 | pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar | ||
157 | for _ in pbar: | ||
158 | v = np.ones(sh) | ||
159 | while (v == 1).all(): # mutate until a change occurs (prevent duplicates) | ||
160 | v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) | ||
161 | kg = (k.copy() * v).clip(min=2.0) | ||
162 | fg = anchor_fitness(kg) | ||
163 | if fg > f: | ||
164 | f, k = fg, kg.copy() | ||
165 | pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' | ||
166 | if verbose: | ||
167 | print_results(k, verbose) | ||
168 | |||
169 | return print_results(k).astype(np.float32) |
utils/autobatch.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Auto-batch utils | ||
4 | """ | ||
5 | |||
6 | from copy import deepcopy | ||
7 | |||
8 | import numpy as np | ||
9 | import torch | ||
10 | |||
11 | from utils.general import LOGGER, colorstr | ||
12 | from utils.torch_utils import profile | ||
13 | |||
14 | |||
15 | def check_train_batch_size(model, imgsz=640, amp=True): | ||
16 | # Check YOLOv5 training batch size | ||
17 | with torch.cuda.amp.autocast(amp): | ||
18 | return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size | ||
19 | |||
20 | |||
21 | def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): | ||
22 | # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory | ||
23 | # Usage: | ||
24 | # import torch | ||
25 | # from utils.autobatch import autobatch | ||
26 | # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) | ||
27 | # print(autobatch(model)) | ||
28 | |||
29 | # Check device | ||
30 | prefix = colorstr('AutoBatch: ') | ||
31 | LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') | ||
32 | device = next(model.parameters()).device # get model device | ||
33 | if device.type == 'cpu': | ||
34 | LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') | ||
35 | return batch_size | ||
36 | if torch.backends.cudnn.benchmark: | ||
37 | LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') | ||
38 | return batch_size | ||
39 | |||
40 | # Inspect CUDA memory | ||
41 | gb = 1 << 30 # bytes to GiB (1024 ** 3) | ||
42 | d = str(device).upper() # 'CUDA:0' | ||
43 | properties = torch.cuda.get_device_properties(device) # device properties | ||
44 | t = properties.total_memory / gb # GiB total | ||
45 | r = torch.cuda.memory_reserved(device) / gb # GiB reserved | ||
46 | a = torch.cuda.memory_allocated(device) / gb # GiB allocated | ||
47 | f = t - (r + a) # GiB free | ||
48 | LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') | ||
49 | |||
50 | # Profile batch sizes | ||
51 | batch_sizes = [1, 2, 4, 8, 16] | ||
52 | try: | ||
53 | img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] | ||
54 | results = profile(img, model, n=3, device=device) | ||
55 | except Exception as e: | ||
56 | LOGGER.warning(f'{prefix}{e}') | ||
57 | |||
58 | # Fit a solution | ||
59 | y = [x[2] for x in results if x] # memory [2] | ||
60 | p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit | ||
61 | b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) | ||
62 | if None in results: # some sizes failed | ||
63 | i = results.index(None) # first fail index | ||
64 | if b >= batch_sizes[i]: # y intercept above failure point | ||
65 | b = batch_sizes[max(i - 1, 0)] # select prior safe point | ||
66 | if b < 1 or b > 1024: # b outside of safe range | ||
67 | b = batch_size | ||
68 | LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') | ||
69 | |||
70 | fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted | ||
71 | LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') | ||
72 | return b |
utils/aws/__init__.py
0 → 100644
File mode changed
utils/aws/mime.sh
0 → 100644
1 | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ | ||
2 | # This script will run on every instance restart, not only on first start | ||
3 | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- | ||
4 | |||
5 | Content-Type: multipart/mixed; boundary="//" | ||
6 | MIME-Version: 1.0 | ||
7 | |||
8 | --// | ||
9 | Content-Type: text/cloud-config; charset="us-ascii" | ||
10 | MIME-Version: 1.0 | ||
11 | Content-Transfer-Encoding: 7bit | ||
12 | Content-Disposition: attachment; filename="cloud-config.txt" | ||
13 | |||
14 | #cloud-config | ||
15 | cloud_final_modules: | ||
16 | - [scripts-user, always] | ||
17 | |||
18 | --// | ||
19 | Content-Type: text/x-shellscript; charset="us-ascii" | ||
20 | MIME-Version: 1.0 | ||
21 | Content-Transfer-Encoding: 7bit | ||
22 | Content-Disposition: attachment; filename="userdata.txt" | ||
23 | |||
24 | #!/bin/bash | ||
25 | # --- paste contents of userdata.sh here --- | ||
26 | --// |
utils/aws/resume.py
0 → 100644
1 | # Resume all interrupted trainings in yolov5/ dir including DDP trainings | ||
2 | # Usage: $ python utils/aws/resume.py | ||
3 | |||
4 | import os | ||
5 | import sys | ||
6 | from pathlib import Path | ||
7 | |||
8 | import torch | ||
9 | import yaml | ||
10 | |||
11 | FILE = Path(__file__).resolve() | ||
12 | ROOT = FILE.parents[2] # YOLOv5 root directory | ||
13 | if str(ROOT) not in sys.path: | ||
14 | sys.path.append(str(ROOT)) # add ROOT to PATH | ||
15 | |||
16 | port = 0 # --master_port | ||
17 | path = Path('').resolve() | ||
18 | for last in path.rglob('*/**/last.pt'): | ||
19 | ckpt = torch.load(last) | ||
20 | if ckpt['optimizer'] is None: | ||
21 | continue | ||
22 | |||
23 | # Load opt.yaml | ||
24 | with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: | ||
25 | opt = yaml.safe_load(f) | ||
26 | |||
27 | # Get device count | ||
28 | d = opt['device'].split(',') # devices | ||
29 | nd = len(d) # number of devices | ||
30 | ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel | ||
31 | |||
32 | if ddp: # multi-GPU | ||
33 | port += 1 | ||
34 | cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' | ||
35 | else: # single-GPU | ||
36 | cmd = f'python train.py --resume {last}' | ||
37 | |||
38 | cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread | ||
39 | print(cmd) | ||
40 | os.system(cmd) |
utils/aws/userdata.sh
0 → 100644
1 | #!/bin/bash | ||
2 | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html | ||
3 | # This script will run only once on first instance start (for a re-start script see mime.sh) | ||
4 | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir | ||
5 | # Use >300 GB SSD | ||
6 | |||
7 | cd home/ubuntu | ||
8 | if [ ! -d yolov5 ]; then | ||
9 | echo "Running first-time script." # install dependencies, download COCO, pull Docker | ||
10 | git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 | ||
11 | cd yolov5 | ||
12 | bash data/scripts/get_coco.sh && echo "COCO done." & | ||
13 | sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & | ||
14 | python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & | ||
15 | wait && echo "All tasks done." # finish background tasks | ||
16 | else | ||
17 | echo "Running re-start script." # resume interrupted runs | ||
18 | i=0 | ||
19 | list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' | ||
20 | while IFS= read -r id; do | ||
21 | ((i++)) | ||
22 | echo "restarting container $i: $id" | ||
23 | sudo docker start $id | ||
24 | # sudo docker exec -it $id python train.py --resume # single-GPU | ||
25 | sudo docker exec -d $id python utils/aws/resume.py # multi-scenario | ||
26 | done <<<"$list" | ||
27 | fi |
utils/callbacks.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Callback utils | ||
4 | """ | ||
5 | |||
6 | import threading | ||
7 | |||
8 | |||
9 | class Callbacks: | ||
10 | """" | ||
11 | Handles all registered callbacks for YOLOv5 Hooks | ||
12 | """ | ||
13 | |||
14 | def __init__(self): | ||
15 | # Define the available callbacks | ||
16 | self._callbacks = { | ||
17 | 'on_pretrain_routine_start': [], | ||
18 | 'on_pretrain_routine_end': [], | ||
19 | 'on_train_start': [], | ||
20 | 'on_train_epoch_start': [], | ||
21 | 'on_train_batch_start': [], | ||
22 | 'optimizer_step': [], | ||
23 | 'on_before_zero_grad': [], | ||
24 | 'on_train_batch_end': [], | ||
25 | 'on_train_epoch_end': [], | ||
26 | 'on_val_start': [], | ||
27 | 'on_val_batch_start': [], | ||
28 | 'on_val_image_end': [], | ||
29 | 'on_val_batch_end': [], | ||
30 | 'on_val_end': [], | ||
31 | 'on_fit_epoch_end': [], # fit = train + val | ||
32 | 'on_model_save': [], | ||
33 | 'on_train_end': [], | ||
34 | 'on_params_update': [], | ||
35 | 'teardown': [],} | ||
36 | self.stop_training = False # set True to interrupt training | ||
37 | |||
38 | def register_action(self, hook, name='', callback=None): | ||
39 | """ | ||
40 | Register a new action to a callback hook | ||
41 | |||
42 | Args: | ||
43 | hook: The callback hook name to register the action to | ||
44 | name: The name of the action for later reference | ||
45 | callback: The callback to fire | ||
46 | """ | ||
47 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" | ||
48 | assert callable(callback), f"callback '{callback}' is not callable" | ||
49 | self._callbacks[hook].append({'name': name, 'callback': callback}) | ||
50 | |||
51 | def get_registered_actions(self, hook=None): | ||
52 | """" | ||
53 | Returns all the registered actions by callback hook | ||
54 | |||
55 | Args: | ||
56 | hook: The name of the hook to check, defaults to all | ||
57 | """ | ||
58 | return self._callbacks[hook] if hook else self._callbacks | ||
59 | |||
60 | def run(self, hook, *args, thread=False, **kwargs): | ||
61 | """ | ||
62 | Loop through the registered actions and fire all callbacks on main thread | ||
63 | |||
64 | Args: | ||
65 | hook: The name of the hook to check, defaults to all | ||
66 | args: Arguments to receive from YOLOv5 | ||
67 | thread: (boolean) Run callbacks in daemon thread | ||
68 | kwargs: Keyword Arguments to receive from YOLOv5 | ||
69 | """ | ||
70 | |||
71 | assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" | ||
72 | for logger in self._callbacks[hook]: | ||
73 | if thread: | ||
74 | threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() | ||
75 | else: | ||
76 | logger['callback'](*args, **kwargs) |
utils/dataloaders.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/docker/Dockerfile
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 | ||
3 | # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference | ||
4 | |||
5 | # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch | ||
6 | FROM nvcr.io/nvidia/pytorch:22.09-py3 | ||
7 | RUN rm -rf /opt/pytorch # remove 1.2GB dir | ||
8 | |||
9 | # Downloads to user config dir | ||
10 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ | ||
11 | |||
12 | # Install linux packages | ||
13 | RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx | ||
14 | |||
15 | # Install pip packages | ||
16 | COPY requirements.txt . | ||
17 | RUN python -m pip install --upgrade pip wheel | ||
18 | RUN pip uninstall -y Pillow torchtext torch torchvision | ||
19 | RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ | ||
20 | 'opencv-python<4.6.0.66' \ | ||
21 | --extra-index-url https://download.pytorch.org/whl/cu113 | ||
22 | |||
23 | # Create working directory | ||
24 | RUN mkdir -p /usr/src/app | ||
25 | WORKDIR /usr/src/app | ||
26 | |||
27 | # Copy contents | ||
28 | # COPY . /usr/src/app (issues as not a .git directory) | ||
29 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app | ||
30 | |||
31 | # Set environment variables | ||
32 | ENV OMP_NUM_THREADS=8 | ||
33 | |||
34 | |||
35 | # Usage Examples ------------------------------------------------------------------------------------------------------- | ||
36 | |||
37 | # Build and Push | ||
38 | # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t | ||
39 | |||
40 | # Pull and Run | ||
41 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t | ||
42 | |||
43 | # Pull and Run with local directory access | ||
44 | # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t | ||
45 | |||
46 | # Kill all | ||
47 | # sudo docker kill $(sudo docker ps -q) | ||
48 | |||
49 | # Kill all image-based | ||
50 | # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) | ||
51 | |||
52 | # DockerHub tag update | ||
53 | # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew | ||
54 | |||
55 | # Clean up | ||
56 | # docker system prune -a --volumes | ||
57 | |||
58 | # Update Ubuntu drivers | ||
59 | # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ | ||
60 | |||
61 | # DDP test | ||
62 | # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 | ||
63 | |||
64 | # GCP VM from Image | ||
65 | # docker.io/ultralytics/yolov5:latest |
utils/docker/Dockerfile-arm64
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 | ||
3 | # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi | ||
4 | |||
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu | ||
6 | FROM arm64v8/ubuntu:20.04 | ||
7 | |||
8 | # Downloads to user config dir | ||
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ | ||
10 | |||
11 | # Install linux packages | ||
12 | RUN apt update | ||
13 | RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata | ||
14 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev | ||
15 | # RUN alias python=python3 | ||
16 | |||
17 | # Install pip packages | ||
18 | COPY requirements.txt . | ||
19 | RUN python3 -m pip install --upgrade pip wheel | ||
20 | RUN pip install --no-cache -r requirements.txt gsutil notebook \ | ||
21 | tensorflow-aarch64 | ||
22 | # tensorflowjs \ | ||
23 | # onnx onnx-simplifier onnxruntime \ | ||
24 | # coremltools openvino-dev \ | ||
25 | |||
26 | # Create working directory | ||
27 | RUN mkdir -p /usr/src/app | ||
28 | WORKDIR /usr/src/app | ||
29 | |||
30 | # Copy contents | ||
31 | # COPY . /usr/src/app (issues as not a .git directory) | ||
32 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app | ||
33 | |||
34 | |||
35 | # Usage Examples ------------------------------------------------------------------------------------------------------- | ||
36 | |||
37 | # Build and Push | ||
38 | # t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t | ||
39 | |||
40 | # Pull and Run | ||
41 | # t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t |
utils/docker/Dockerfile-cpu
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 | ||
3 | # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments | ||
4 | |||
5 | # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu | ||
6 | FROM ubuntu:20.04 | ||
7 | |||
8 | # Downloads to user config dir | ||
9 | ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ | ||
10 | |||
11 | # Install linux packages | ||
12 | RUN apt update | ||
13 | RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata | ||
14 | RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev | ||
15 | # RUN alias python=python3 | ||
16 | |||
17 | # Install pip packages | ||
18 | COPY requirements.txt . | ||
19 | RUN python3 -m pip install --upgrade pip wheel | ||
20 | RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ | ||
21 | coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ | ||
22 | # openvino-dev \ | ||
23 | --extra-index-url https://download.pytorch.org/whl/cpu | ||
24 | |||
25 | # Create working directory | ||
26 | RUN mkdir -p /usr/src/app | ||
27 | WORKDIR /usr/src/app | ||
28 | |||
29 | # Copy contents | ||
30 | # COPY . /usr/src/app (issues as not a .git directory) | ||
31 | RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app | ||
32 | |||
33 | |||
34 | # Usage Examples ------------------------------------------------------------------------------------------------------- | ||
35 | |||
36 | # Build and Push | ||
37 | # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t | ||
38 | |||
39 | # Pull and Run | ||
40 | # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t |
utils/downloads.py
0 → 100644
1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license | ||
2 | """ | ||
3 | Download utils | ||
4 | """ | ||
5 | |||
6 | import logging | ||
7 | import os | ||
8 | import platform | ||
9 | import subprocess | ||
10 | import time | ||
11 | import urllib | ||
12 | from pathlib import Path | ||
13 | from zipfile import ZipFile | ||
14 | |||
15 | import requests | ||
16 | import torch | ||
17 | |||
18 | |||
19 | def is_url(url, check=True): | ||
20 | # Check if string is URL and check if URL exists | ||
21 | try: | ||
22 | url = str(url) | ||
23 | result = urllib.parse.urlparse(url) | ||
24 | assert all([result.scheme, result.netloc]) # check if is url | ||
25 | return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online | ||
26 | except (AssertionError, urllib.request.HTTPError): | ||
27 | return False | ||
28 | |||
29 | |||
30 | def gsutil_getsize(url=''): | ||
31 | # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du | ||
32 | s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') | ||
33 | return eval(s.split(' ')[0]) if len(s) else 0 # bytes | ||
34 | |||
35 | |||
36 | def url_getsize(url='https://ultralytics.com/images/bus.jpg'): | ||
37 | # Return downloadable file size in bytes | ||
38 | response = requests.head(url, allow_redirects=True) | ||
39 | return int(response.headers.get('content-length', -1)) | ||
40 | |||
41 | |||
42 | def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): | ||
43 | # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes | ||
44 | from utils.general import LOGGER | ||
45 | |||
46 | file = Path(file) | ||
47 | assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" | ||
48 | try: # url1 | ||
49 | LOGGER.info(f'Downloading {url} to {file}...') | ||
50 | torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) | ||
51 | assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check | ||
52 | except Exception as e: # url2 | ||
53 | if file.exists(): | ||
54 | file.unlink() # remove partial downloads | ||
55 | LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') | ||
56 | os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail | ||
57 | finally: | ||
58 | if not file.exists() or file.stat().st_size < min_bytes: # check | ||
59 | if file.exists(): | ||
60 | file.unlink() # remove partial downloads | ||
61 | LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") | ||
62 | LOGGER.info('') | ||
63 | |||
64 | |||
65 | def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): | ||
66 | # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. | ||
67 | from utils.general import LOGGER | ||
68 | |||
69 | def github_assets(repository, version='latest'): | ||
70 | # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) | ||
71 | if version != 'latest': | ||
72 | version = f'tags/{version}' # i.e. tags/v6.2 | ||
73 | response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api | ||
74 | return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets | ||
75 | |||
76 | file = Path(str(file).strip().replace("'", '')) | ||
77 | if not file.exists(): | ||
78 | # URL specified | ||
79 | name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. | ||
80 | if str(file).startswith(('http:/', 'https:/')): # download | ||
81 | url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ | ||
82 | file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... | ||
83 | if Path(file).is_file(): | ||
84 | LOGGER.info(f'Found {url} locally at {file}') # file already exists | ||
85 | else: | ||
86 | safe_download(file=file, url=url, min_bytes=1E5) | ||
87 | return file | ||
88 | |||
89 | # GitHub assets | ||
90 | assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default | ||
91 | try: | ||
92 | tag, assets = github_assets(repo, release) | ||
93 | except Exception: | ||
94 | try: | ||
95 | tag, assets = github_assets(repo) # latest release | ||
96 | except Exception: | ||
97 | try: | ||
98 | tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] | ||
99 | except Exception: | ||
100 | tag = release | ||
101 | |||
102 | file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) | ||
103 | if name in assets: | ||
104 | url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror | ||
105 | safe_download( | ||
106 | file, | ||
107 | url=f'https://github.com/{repo}/releases/download/{tag}/{name}', | ||
108 | min_bytes=1E5, | ||
109 | error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') | ||
110 | |||
111 | return str(file) | ||
112 | |||
113 | |||
114 | def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): | ||
115 | # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() | ||
116 | t = time.time() | ||
117 | file = Path(file) | ||
118 | cookie = Path('cookie') # gdrive cookie | ||
119 | print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') | ||
120 | if file.exists(): | ||
121 | file.unlink() # remove existing file | ||
122 | if cookie.exists(): | ||
123 | cookie.unlink() # remove existing cookie | ||
124 | |||
125 | # Attempt file download | ||
126 | out = "NUL" if platform.system() == "Windows" else "/dev/null" | ||
127 | os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') | ||
128 | if os.path.exists('cookie'): # large file | ||
129 | s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' | ||
130 | else: # small file | ||
131 | s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' | ||
132 | r = os.system(s) # execute, capture return | ||
133 | if cookie.exists(): | ||
134 | cookie.unlink() # remove existing cookie | ||
135 | |||
136 | # Error check | ||
137 | if r != 0: | ||
138 | if file.exists(): | ||
139 | file.unlink() # remove partial | ||
140 | print('Download error ') # raise Exception('Download error') | ||
141 | return r | ||
142 | |||
143 | # Unzip if archive | ||
144 | if file.suffix == '.zip': | ||
145 | print('unzipping... ', end='') | ||
146 | ZipFile(file).extractall(path=file.parent) # unzip | ||
147 | file.unlink() # remove zip | ||
148 | |||
149 | print(f'Done ({time.time() - t:.1f}s)') | ||
150 | return r | ||
151 | |||
152 | |||
153 | def get_token(cookie="./cookie"): | ||
154 | with open(cookie) as f: | ||
155 | for line in f: | ||
156 | if "download" in line: | ||
157 | return line.split()[-1] | ||
158 | return "" | ||
159 | |||
160 | |||
161 | # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- | ||
162 | # | ||
163 | # | ||
164 | # def upload_blob(bucket_name, source_file_name, destination_blob_name): | ||
165 | # # Uploads a file to a bucket | ||
166 | # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python | ||
167 | # | ||
168 | # storage_client = storage.Client() | ||
169 | # bucket = storage_client.get_bucket(bucket_name) | ||
170 | # blob = bucket.blob(destination_blob_name) | ||
171 | # | ||
172 | # blob.upload_from_filename(source_file_name) | ||
173 | # | ||
174 | # print('File {} uploaded to {}.'.format( | ||
175 | # source_file_name, | ||
176 | # destination_blob_name)) | ||
177 | # | ||
178 | # | ||
179 | # def download_blob(bucket_name, source_blob_name, destination_file_name): | ||
180 | # # Uploads a blob from a bucket | ||
181 | # storage_client = storage.Client() | ||
182 | # bucket = storage_client.get_bucket(bucket_name) | ||
183 | # blob = bucket.blob(source_blob_name) | ||
184 | # | ||
185 | # blob.download_to_filename(destination_file_name) | ||
186 | # | ||
187 | # print('Blob {} downloaded to {}.'.format( | ||
188 | # source_blob_name, | ||
189 | # destination_file_name)) |
utils/flask_rest_api/README.md
0 → 100644
This diff is collapsed.
Click to expand it.
utils/flask_rest_api/example_request.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/flask_rest_api/restapi.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/general.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/google_app_engine/Dockerfile
0 → 100644
This diff is collapsed.
Click to expand it.
This diff is collapsed.
Click to expand it.
utils/google_app_engine/app.yaml
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/__init__.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/clearml/README.md
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/clearml/__init__.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/clearml/clearml_utils.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/clearml/hpo.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/comet/README.md
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/comet/__init__.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/comet/comet_utils.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/comet/hpo.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/comet/optimizer_config.json
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/wandb/README.md
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/wandb/__init__.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/wandb/log_dataset.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/wandb/sweep.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/wandb/sweep.yaml
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loggers/wandb/wandb_utils.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/loss.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/metrics.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/plots.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/__init__.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/augmentations.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/dataloaders.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/general.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/loss.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/metrics.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/segment/plots.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/torch_utils.py
0 → 100644
This diff is collapsed.
Click to expand it.
utils/triton.py
0 → 100644
This diff is collapsed.
Click to expand it.
val.py
0 → 100644
This diff is collapsed.
Click to expand it.
-
Please register or sign in to post a comment