ffaa3550 by 乔峰昇

update

1 parent 298fa6f9
Showing 59 changed files with 11705 additions and 1 deletions
1 model_repository/ 1 model_repository/
2 2 __pycache__/
......
1 # turnsole
2 A series of convenience functions make your machine learning project easier
3
4 ## 安装方法
5
6 ### Latest release
7 `pip install turnsole`
8 > 项目暂不开源,因此该安装方法暂时不保证能用
9
10 ### Developer mode
11
12 `pip install -e .`
13
14 ## 快速上手
15 ### PDF 操作
16 #### 智能 PDF 文件转图片
17 智能的把 PDF 文件里面的插图找出来,例如没有插图就将整页 PDF 截图下来,也能智能的将碎图拼接在一起
18
19 ##### Example:
20 <pre># pdf_path 表示 PDF 文件的路径,输出 images 按页码进行汇总输出
21 images = turnsole.pdf_to_images(pdf_path)</pre>
22
23 ### 图像操作工具箱
24 #### base64_to_bgr / bgr_to_base64
25 图像和 base64 互相转换
26
27 ##### Example:
28 <pre>image = turnsole.base64_to_bgr(img64)
29 img64 = turnsole.bgr_to_base64(image)</pre>
30
31 ### image_crop
32 根据 bbox 在 image 上进行切片,如果指定 perspective 为 True 则切片方式为透视变换(可以切旋转目标)
33
34 ##### Example:
35 <pre>im_slice_no_perspective = turnsole.image_crop(image, bbox)
36 im_slice = turnsole.image_crop(image, bbox, perspective=True)</pre>
37
38 ##### Output:
39
40 <img src="docs/images/image_crop.png?raw=true" alt="image crop example" style="max-width: 200px;">
41
42 ### OCR 引擎模块
43 OCR 引擎指的是一系列跟 OCR 相关的底层模型,我们提供了这些模型的函数式调用接口和标准 API
44
45 - [x] ADC :tada:
46 - [x] DBNet :tada:
47 - [x] CRNN :tada:
48 - [x] Object Detector :tada:
49 - [x] Signature Detector :tada:
50
51 #### 免费试用
52 ```python
53 import requests
54
55 results = requests.post(url=r'http://139.196.149.46:9001/gen_ocr', files={'file': open(file_path, 'rb')}).json()
56 ocr_results = results['ocr_results']
57 ```
58
59 #### Prerequisites
60 由于 OCR 引擎模块依赖于底层神经网络模型,因此需要先用 Docker 挂载底层神经网络模型
61
62 首先把 ./model_repository 文件夹和里面的模型放到项目根目录下再启动,如果没有相关模型找 [lvkui](lvkui@situdata.com)
63
64 使用起来非常简单,你只需要启动对应的 Docker 容器即可
65
66 ```bash
67 docker run --gpus="device=0" --rm -p 8000:8000 -p 8001:8001 -p 8002:8002 -v $PWD/model_repository:/models nvcr.io/nvidia/tritonserver:21.10-py3 tritonserver --model-repository=/models
68 ```
69
70 #### ADC
71 通用文件摆正算法
72
73 ```
74 from turnsole.ocr_engine import angle_detector
75
76 image_rotated, direction = angle_detector.ADC(image, fine_degree=False)
77 ```
78
79 #### DBNet
80 通用文字检测算法
81
82 ```
83 from turnsole.ocr_engine import text_detector
84
85 boxes = text_detector.predict(image)
86 ```
87
88 #### CRNN
89 通用文字识别算法
90
91 ```
92 from turnsole.ocr_engine import text_recognizer
93
94 ocr_result, ocr_time = text_recognizer.predict_batch(image, boxes)
95 ```
96
97 #### Object Detector
98 通用文件检测算法
99
100 ```
101 from turnsole.ocr_engine import object_detector
102
103 object_list = object_detector.process(image)
104 ```
105
106 #### Signature Detector
107 签字盖章二维码检测算法
108
109 ```
110 from turnsole.ocr_engine import signature_detector
111
112 signature_list = signature_detector.process(image)
113 ```
114
115 #### 标准 API
116 ```
117 python api/ocr_engine_server.py
118 ```
...\ No newline at end of file ...\ No newline at end of file
1 [2022-10-21 14:12:17 +0800] [8546] [INFO] Goin' Fast @ http://192.168.10.11:9001
2 [2022-10-21 14:12:17 +0800] [8567] [INFO] Starting worker [8567]
3 [2022-10-21 14:12:17 +0800] [8568] [INFO] Starting worker [8568]
4 [2022-10-21 14:12:17 +0800] [8569] [INFO] Starting worker [8569]
5 [2022-10-21 14:12:17 +0800] [8570] [INFO] Starting worker [8570]
6 [2022-10-21 14:12:17 +0800] [8571] [INFO] Starting worker [8571]
7 [2022-10-21 14:12:17 +0800] [8572] [INFO] Starting worker [8572]
8 [2022-10-21 14:12:17 +0800] [8573] [INFO] Starting worker [8573]
9 [2022-10-21 14:12:17 +0800] [8576] [INFO] Starting worker [8576]
10 [2022-10-21 14:12:17 +0800] [8574] [INFO] Starting worker [8574]
11 [2022-10-21 14:12:17 +0800] [8575] [INFO] Starting worker [8575]
12 [2022-10-21 14:13:51 +0800] [8575] [ERROR] Exception occurred while handling uri: 'http://192.168.10.11:9001/gen_ocr'
13 Traceback (most recent call last):
14 File "/home/situ/miniconda3/envs/workenv/lib/python3.6/site-packages/sanic/app.py", line 944, in handle_request
15 response = await response
16 File "ocr_engine_server.py", line 37, in ocr_engine
17 boxes = text_detector.predict(image)
18 File "/home/situ/qfs/invoice_tamper/09_project/project/bank_bill_ocr/OCR_Engine/turnsole/ocr_engine/DBNet/text_detector.py", line 113, in predict
19 outputs=outputs
20 File "/home/situ/miniconda3/envs/workenv/lib/python3.6/site-packages/tritonclient/grpc/__init__.py", line 1431, in infer
21 raise_error_grpc(rpc_error)
22 File "/home/situ/miniconda3/envs/workenv/lib/python3.6/site-packages/tritonclient/grpc/__init__.py", line 62, in raise_error_grpc
23 raise get_error_grpc(rpc_error) from None
24 tritonclient.utils.InferenceServerException: [StatusCode.UNAVAILABLE] Request for unknown model: 'dbnet_model' is not found
25 [2022-10-21 14:13:51 +0800] - (sanic.access)[INFO][192.168.10.11:57260]: POST http://192.168.10.11:9001/gen_ocr 500 735
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-06-05 20:49:51
5 # @Last Modified : 2022-08-19 17:24:55
6 # @Description :
7
8 import os
9
10 os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
11
12 from sanic import Sanic
13 from sanic.response import json
14
15 from turnsole.ocr_engine import angle_detector
16 from turnsole.ocr_engine import text_detector
17 from turnsole.ocr_engine import text_recognizer
18 from turnsole.ocr_engine import object_detector
19 from turnsole.ocr_engine import signature_detector
20
21 from turnsole import bytes_to_bgr
22
23 app = Sanic("OCR_ENGINE")
24 app.config.REQUEST_MAX_SIZE = 1000000000 # 请求的大小(字节)/ 1GB
25 app.config.REQUEST_BUFFER_QUEUE_SIZE = 1000 # 请求流缓冲区队列大小
26 app.config.REQUEST_TIMEOUT = 600 # 请求到达需要多长时间(秒)
27 app.config.RESPONSE_TIMEOUT = 600 # 处理响应需要多长时间(秒)
28
29
30 @app.post('/gen_ocr')
31 async def ocr_engine(request):
32 # request.files.get() 具有 type/body/name 三个属性
33 file = request.files.get('file').body
34 # 将 bytes 转成 bgr 图片
35 image = bytes_to_bgr(file)
36 # 文字检测
37 boxes = text_detector.predict(image)
38 # 文字识别
39 res, _ = text_recognizer.predict_batch(image[..., ::-1], boxes)
40 resp = {}
41 resp["ocr_results"] = res
42 return json(resp)
43
44
45 @app.post('/gen_ocr_with_rotation', )
46 async def ocr_engine_with_rotation(request):
47 # request.files.get() 具有 type/body/name 三个属性
48 file = request.files.get('file').body
49 # 将 bytes 转成 bgr 图片
50 image = bytes_to_bgr(file)
51 # 方向检测
52 image, direction = angle_detector.ADC(image.copy(), fine_degree=False)
53 # 文字检测
54 boxes = text_detector.predict(image)
55 # 文字识别
56 res, _ = text_recognizer.predict_batch(image[..., ::-1], boxes)
57
58 resp = {}
59 resp["ocr_results"] = res
60 resp["direction"] = direction
61 return json(resp)
62
63
64 @app.post("/object_detect")
65 async def object_detect(request):
66 # request.files.get() 具有 type/body/name 三个属性
67 file = request.files.get('file').body
68 # 将 bytes 转成 bgr 图片
69 image = bytes_to_bgr(file)
70 # 通用文件检测
71 object_list = object_detector.process(image)
72 return json(object_list)
73
74
75 @app.post("/signature_detect")
76 async def signature_detect(request):
77 # request.files.get() 具有 type/body/name 三个属性
78 file = request.files.get('file').body
79 # 将 bytes 转成 bgr 图片
80 image = bytes_to_bgr(file)
81 # 签字盖章二维码条形码检测
82 signature_list = signature_detector.process(image)
83 return json(signature_list)
84
85
86 if __name__ == "__main__":
87 # app.run(host="0.0.0.0", port=9001)
88 app.run(host="192.168.10.11", port=9002, workers=10)
89 # uvicorn server:app --port 9001 --workers 10
No preview for this file type
No preview for this file type
1 # Modified from:
2 # https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
3
4 # Performance:
5 # Python 2.7: 105.78 --> 131.75
6 # Python 3.7: 15.36 --> 50.13
7
8 # USAGE
9 # python read_frames_fast.py --video videos/jurassic_park_intro.mp4
10
11 # import the necessary packages
12 from turnsole.video import FileVideoStream
13 from turnsole.video import FPS
14 import numpy as np
15 import argparse
16 import imutils
17 import time
18 import cv2
19
20 def filterFrame(frame):
21 frame = imutils.resize(frame, width=450)
22 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
23 frame = np.dstack([frame, frame, frame])
24 return frame
25
26 # construct the argument parse and parse the arguments
27 ap = argparse.ArgumentParser()
28 ap.add_argument("-v", "--video", required=True,
29 help="path to input video file")
30 args = vars(ap.parse_args())
31
32 # start the file video stream thread and allow the buffer to
33 # start to fill
34 print("[INFO] starting video file thread...")
35 fvs = FileVideoStream(args["video"], transform=filterFrame).start()
36 time.sleep(1.0)
37
38 # start the FPS timer
39 fps = FPS().start()
40
41 # loop over frames from the video file stream
42 while fvs.running():
43 # grab the frame from the threaded video file stream, resize
44 # it, and convert it to grayscale (while still retaining 3
45 # channels)
46 frame = fvs.read()
47
48 # Relocated filtering into producer thread with transform=filterFrame
49 # Python 2.7: FPS 92.11 -> 131.36
50 # Python 3.7: FPS 41.44 -> 50.11
51 #frame = filterFrame(frame)
52
53 # display the size of the queue on the frame
54 cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
55 (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
56
57 # show the frame and update the FPS counter
58 cv2.imshow("Frame", frame)
59
60 cv2.waitKey(1)
61 if fvs.Q.qsize() < 2: # If we are low on frames, give time to producer
62 time.sleep(0.001) # Ensures producer runs now, so 2 is sufficient
63 fps.update()
64
65 # stop the timer and display FPS information
66 fps.stop()
67 print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
68 print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
69
70 # do a bit of cleanup
71 cv2.destroyAllWindows()
72 fvs.stop()
...\ No newline at end of file ...\ No newline at end of file
1 import cv2
2 import turnsole
3
4 if __name__ == '__main__':
5 img = cv2.imread('./images/sunflower.jpg')
6 img = turnsole.resize(img, width=512)
7 cv2.imshow('image', img)
8 cv2.waitKey()
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Created Date : 2021-03-05 16:51:22
5 # @Last Modified : 2021-03-05 18:15:53
6 # @Description :
7
8 from turnsole.model import EasyDet
9
10 if __name__ == '__main__':
11 model = EasyDet(phi=0)
12 model.summary()
13
14 import time
15 import numpy as np
16
17 x = np.random.random_sample((1, 640, 640, 3))
18 # warm up
19 output = model.predict(x)
20
21 print('\n[INFO] Test start')
22 time_start = time.time()
23 for i in range(1000):
24 output = model.predict(x)
25
26 time_end = time.time()
27 print('[INFO] Time used: {:.2f} ms'.format((time_end - time_start)*1000/(i+1)))
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-07-22 13:10:47
5 # @Last Modified : 2022-09-08 19:03:24
6 # @Description :
7
8 import os
9 os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
10
11 import cv2
12 # from turnsole.ocr_engine import angle_detector
13 from turnsole.ocr_engine import object_detector
14 import matplotlib.pyplot as plt
15
16
17 if __name__ == "__main__":
18
19 base_dir = '/home/lk/MyProject/BMW/数据集/文件分类/身份证'
20
21 for (rootDir, dirNames, filenames) in os.walk(base_dir):
22
23 for filename in filenames:
24
25 if not filename.endswith('.jpg'):
26 continue
27
28 img_path = os.path.join(rootDir, filename)
29 print(img_path)
30
31 image = cv2.imread(img_path)
32
33 results = object_detector.process(image)
34
35 print(results)
36
37 for item in results:
38 xmin = item['location']['xmin']
39 ymin = item['location']['ymin']
40 xmax = item['location']['xmax']
41 ymax = item['location']['ymax']
42 cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
43
44 plt.imshow(image[...,::-1])
45 plt.show()
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-07-22 13:10:47
5 # @Last Modified : 2022-08-24 15:39:55
6 # @Description :
7
8
9 import os
10 import cv2
11 import fitz
12 from turnsole import pdf_to_images # pip install turnsole PyMuPDF opencv-python==4.4.0.44
13
14 if __name__ == "__main__":
15
16 base_dir = '/PATH/TO/YOUR/WORKDIR'
17
18 for (rootDir, dirNames, filenames) in os.walk(base_dir):
19
20 for filename in filenames:
21
22 if not filename.endswith('.pdf'):
23 continue
24
25 pdf_path = os.path.join(rootDir, filename)
26 print(pdf_path)
27
28 images = pdf_to_images(pdf_path)
29 images = sum(images, [])
30
31 image_dir = os.path.join(rootDir, filename.replace('.pdf', ''))
32 if not os.path.exists(image_dir):
33 os.makedirs(image_dir)
34
35 for index, image in enumerate(images):
36
37 save_path = os.path.join(image_dir, filename.replace('.pdf', '')+'-'+str(index)+'.jpg')
38 cv2.imwrite(save_path, image)
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-05-06 22:02:01
5 # @Last Modified : 2022-08-03 14:59:51
6 # @Description :
7
8
9 import os
10 import time
11 import random
12 import requests
13 import numpy as np
14 from threading import Thread
15
16
17 class API_test:
18 def __init__(self, file_dir, test_time, num_request):
19
20 self.file_paths = []
21 for fn in os.listdir(file_dir):
22 file_path = os.path.join(file_dir, fn)
23 self.file_paths.append(file_path)
24
25 self.time_start = time.time()
26 self.test_time = test_time * 60 # 单位:秒
27 threads = []
28 for i in range(num_request):
29 t = Thread(target=self.update, args=())
30 threads.append(t)
31 for t in threads:
32 print(f'[INFO] {t} is running')
33 t.start()
34 self.results = list()
35 self.index = 0
36
37 def update(self):
38 while True:
39 file_path = random.choice(self.file_paths)
40
41 # 二进制方式打开图片文件
42 data = open(file_path, 'rb')
43
44 t0 = time.time()
45 response = requests.post(url=r'http://localhost:9001/gen_ocr_with_rotation', files={'file': data})
46
47 # 失败请求统计
48 if response.status_code != 200:
49 print(response)
50
51 t1 = time.time()
52 self.results.append((t1-t0))
53
54 time_cost = (time.time() - self.time_start)
55 time_remaining = self.test_time - time_cost
56
57 self.index += 1
58
59 if time_remaining > 0:
60 print(f'\r[INFO] 剩余时间 {time_remaining} 秒, 平均响应时间 {np.mean(self.results)} 秒, TPS {len(self.results)/time_cost}, 吞吐量 {self.index}', end=' ', flush=True)
61 else:
62 break
63
64
65 if __name__ == '__main__':
66
67 imageDir = './demos/img_ocr' # 测试数据路径
68 testTime = 10 # 加压时间, 单位:分钟
69 numRequest = 10 # 并发数,单位:个
70
71 API_test(imageDir, testTime, numRequest)
1 [metadata]
2 name = turnsole
3 version = 0.0.27
4 author = Kui Lyu
5 author_email = 9428.al@gmail.com
6 description = A series of convenience functions make your machine learning project easier
7 long_description = file: README.md
8 long_description_content_type = text/markdown
9 url = https://github.com/Antonio-hi/turnsole
10 project_urls =
11 Bug Tracker = https://github.com/Antonio-hi/turnsole/issues
12 classifiers =
13 Programming Language :: Python :: 3
14 License :: OSI Approved :: MIT License
15 Operating System :: OS Independent
16
17 [options]
18 packages = find:
19 python_requires = >=3.6
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : lk
3 # @Email : 9428.al@gmail.com
4 # @Created Date : 2021-03-04 16:56:27
5 # @Last Modified : 2021-03-04 17:16:57
6 # @Description :
7
8 import setuptools
9
10 setuptools.setup()
...\ No newline at end of file ...\ No newline at end of file
1 Metadata-Version: 2.1
2 Name: turnsole
3 Version: 0.0.27
4 Summary: A series of convenience functions make your machine learning project easier
5 Home-page: https://github.com/Antonio-hi/turnsole
6 Author: Kui Lyu
7 Author-email: 9428.al@gmail.com
8 License: UNKNOWN
9 Project-URL: Bug Tracker, https://github.com/Antonio-hi/turnsole/issues
10 Platform: UNKNOWN
11 Classifier: Programming Language :: Python :: 3
12 Classifier: License :: OSI Approved :: MIT License
13 Classifier: Operating System :: OS Independent
14 Requires-Python: >=3.6
15 Description-Content-Type: text/markdown
16 License-File: LICENSE
17
18 # turnsole
19 A series of convenience functions make your machine learning project easier
20
21 ## 安装方法
22
23 ### Latest release
24 `pip install turnsole`
25 > 项目暂不开源,因此该安装方法暂时不保证能用
26
27 ### Developer mode
28
29 `pip install -e .`
30
31 ## 快速上手
32 ### PDF 操作
33 #### 智能 PDF 文件转图片
34 智能的把 PDF 文件里面的插图找出来,例如没有插图就将整页 PDF 截图下来,也能智能的将碎图拼接在一起
35
36 ##### Example:
37 <pre># pdf_path 表示 PDF 文件的路径,输出 images 按页码进行汇总输出
38 images = turnsole.pdf_to_images(pdf_path)</pre>
39
40 ### 图像操作工具箱
41 #### base64_to_bgr / bgr_to_base64
42 图像和 base64 互相转换
43
44 ##### Example:
45 <pre>image = turnsole.base64_to_bgr(img64)
46 img64 = turnsole.bgr_to_base64(image)</pre>
47
48 ### image_crop
49 根据 bbox 在 image 上进行切片,如果指定 perspective 为 True 则切片方式为透视变换(可以切旋转目标)
50
51 ##### Example:
52 <pre>im_slice_no_perspective = turnsole.image_crop(image, bbox)
53 im_slice = turnsole.image_crop(image, bbox, perspective=True)</pre>
54
55 ##### Output:
56
57 <img src="docs/images/image_crop.png?raw=true" alt="image crop example" style="max-width: 200px;">
58
59 ### OCR 引擎模块
60 OCR 引擎指的是一系列跟 OCR 相关的底层模型,我们提供了这些模型的函数式调用接口和标准 API
61
62 - [x] ADC :tada:
63 - [x] DBNet :tada:
64 - [x] CRNN :tada:
65 - [x] Object Detector :tada:
66 - [x] Signature Detector :tada:
67
68 #### 免费试用
69 ```python
70 import requests
71
72 results = requests.post(url=r'http://139.196.149.46:9001/gen_ocr', files={'file': open(file_path, 'rb')}).json()
73 ocr_results = results['ocr_results']
74 ```
75
76 #### Prerequisites
77 由于 OCR 引擎模块依赖于底层神经网络模型,因此需要先用 Docker 挂载底层神经网络模型
78
79 首先把 ./model_repository 文件夹和里面的模型放到项目根目录下再启动,如果没有相关模型找 [lvkui](lvkui@situdata.com) 要
80
81 使用起来非常简单,你只需要启动对应的 Docker 容器即可
82
83 ```bash
84 docker run --gpus="device=0" --rm -p 8000:8000 -p 8001:8001 -p 8002:8002 -v $PWD/model_repository:/models nvcr.io/nvidia/tritonserver:21.10-py3 tritonserver --model-repository=/models
85 ```
86
87 #### ADC
88 通用文件摆正算法
89
90 ```
91 from turnsole.ocr_engine import angle_detector
92
93 image_rotated, direction = angle_detector.ADC(image, fine_degree=False)
94 ```
95
96 #### DBNet
97 通用文字检测算法
98
99 ```
100 from turnsole.ocr_engine import text_detector
101
102 boxes = text_detector.predict(image)
103 ```
104
105 #### CRNN
106 通用文字识别算法
107
108 ```
109 from turnsole.ocr_engine import text_recognizer
110
111 ocr_result, ocr_time = text_recognizer.predict_batch(image, boxes)
112 ```
113
114 #### Object Detector
115 通用文件检测算法
116
117 ```
118 from turnsole.ocr_engine import object_detector
119
120 object_list = object_detector.process(image)
121 ```
122
123 #### Signature Detector
124 签字盖章二维码检测算法
125
126 ```
127 from turnsole.ocr_engine import signature_detector
128
129 signature_list = signature_detector.process(image)
130 ```
131
132 #### 标准 API
133 ```
134 python api/ocr_engine_server.py
135 ```
136
1 LICENSE
2 README.md
3 setup.cfg
4 setup.py
5 turnsole/__init__.py
6 turnsole/convenience.py
7 turnsole/encodings.py
8 turnsole/model.py
9 turnsole/paths.py
10 turnsole/pdf_tools.py
11 turnsole.egg-info/PKG-INFO
12 turnsole.egg-info/SOURCES.txt
13 turnsole.egg-info/dependency_links.txt
14 turnsole.egg-info/top_level.txt
15 turnsole/face_utils/__init__.py
16 turnsole/face_utils/agedetector.py
17 turnsole/face_utils/facedetector.py
18 turnsole/nets/__init__.py
19 turnsole/nets/efficientnet.py
20 turnsole/ocr_engine/__init__.py
21 turnsole/ocr_engine/ADC/__init__.py
22 turnsole/ocr_engine/ADC/angle_detector.py
23 turnsole/ocr_engine/CRNN/__init__.py
24 turnsole/ocr_engine/CRNN/alphabets.py
25 turnsole/ocr_engine/CRNN/text_rec.py
26 turnsole/ocr_engine/DBNet/__init__.py
27 turnsole/ocr_engine/DBNet/text_detector.py
28 turnsole/ocr_engine/object_det/__init__.py
29 turnsole/ocr_engine/object_det/utils.py
30 turnsole/ocr_engine/signature_det/__init__.py
31 turnsole/ocr_engine/signature_det/utils.py
32 turnsole/ocr_engine/utils/__init__.py
33 turnsole/ocr_engine/utils/read_data.py
34 turnsole/video/__init__.py
35 turnsole/video/count_frames.py
36 turnsole/video/filevideostream.py
37 turnsole/video/fps.py
38 turnsole/video/pivideostream.py
39 turnsole/video/videostream.py
40 turnsole/video/webcamvideostream.py
...\ No newline at end of file ...\ No newline at end of file
1 try:
2 from . import ocr_engine
3 except:
4 # print('[INFO] OCR engine can not import successful')
5 pass
6 from .convenience import resize
7 from .convenience import resize_with_pad
8 from .convenience import image_crop
9 from .encodings import bytes_to_bgr
10 from .encodings import base64_to_image
11 from .encodings import base64_encode_file
12 from .encodings import base64_encode_image
13 from .encodings import base64_decode_image
14 from .encodings import base64_to_bgr
15 from .encodings import bgr_to_base64
16 from .pdf_tools import pdf_to_images
...\ No newline at end of file ...\ No newline at end of file
1 import cv2
2 import numpy as np
3
4 def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
5 # initialize the dimensions of the image to be resized and grab the image size
6 dim = None
7 (h, w) = image.shape[:2]
8
9 # if both the width and height are None, then return the original image
10 if width is None and height is None:
11 return image
12
13 # check to see if the width is None
14 if width is None:
15 # calculate the ratio of the height and construct the dimensions
16 r = height / float(h)
17 dim = (int(w * r), height)
18
19 # otherwise, the height is None
20 else:
21 # calculate the ratio of the width and construct the dimensions
22 r = width / float(w)
23 dim = (width, int(h * r))
24
25 # resize the image
26 resized = cv2.resize(image, dim, interpolation=inter)
27
28 # return the resized image
29 return resized
30
31 def resize_with_pad(image, target_width, target_height):
32 """Resuzes and pads an image to a target width and height.
33
34 Resizes an image to a target width and height by keeping the aspect ratio the same
35 without distortion.
36 ratio must be less than 1.0.
37 width and height will pad with zeroes.
38
39 Args:
40 image (Array): RGB/BGR
41 target_width (Int): Target width.
42 target_height (Int): Target height.
43
44 Returns:
45 Array: Resized and padded image. The image paded with zeroes.
46 Float: Image resized ratio. The ratio must be less than 1.0.
47 """
48 height, width, _ = image.shape
49
50 min_ratio = min(target_height/height, target_width/width)
51 ratio = min_ratio if min_ratio < 1.0 else 1.0
52
53 # To shrink an image, it will generally look best with INTER_AREA interpolation.
54 resized = cv2.resize(image, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA)
55 h, w, _ = resized.shape
56 canvas = np.zeros((target_height, target_width, 3), image.dtype)
57 canvas[:h, :w, :] = resized
58 return canvas, ratio
59
60 def image_crop(image, bbox, perspective=False):
61 """根据 Bbox 在 image 上进行切片,如果指定 perspective 为 True 则切片方式为透视变换(可以切旋转目标)
62
63 Args:
64 image (array): 三通道图片,切片结果保持原图颜色通道
65 bbox (array/list): 支持两点矩形框和四点旋转矩形框
66 支持以下两种格式:
67 1. bbox = [xmin, ymin, xmax, ymax]
68 2. bbox = [x0, y0, x1, y1, x2, y2, x3, y3]
69 perspective (bool, optional): 是否切出旋转目标. Defaults to False.
70
71 Returns:
72 array: 小切图,和原图颜色通道一致
73 """
74 # 按照 bbox 的正外接矩形切图
75 bbox = np.array(bbox, dtype=np.int32).reshape((-1, 2))
76 xmin, ymin, xmax, ymax = [min(bbox[:, 0]),
77 min(bbox[:, 1]),
78 max(bbox[:, 0]),
79 max(bbox[:, 1])]
80 xmin, ymin = max(0, xmin), max(0, ymin)
81 im_slice = image[ymin:ymax, xmin:xmax, :]
82
83 if perspective and bbox.shape[0] == 4:
84 # 获得旋转矩形的宽和高
85 w, h = [int(np.linalg.norm(bbox[0] - bbox[1])),
86 int(np.linalg.norm(bbox[3] - bbox[0]))]
87 # 把 bbox 平移到正切图的对应位置上
88 bbox[:, 0] -= xmin
89 bbox[:, 1] -= ymin
90 # 执行透视切图
91 pts1 = np.float32(bbox)
92 pts2 = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
93 M = cv2.getPerspectiveTransform(pts1, pts2)
94 im_slice = cv2.warpPerspective(im_slice, M, (w, h))
95
96 return im_slice
1 # -*- coding: utf-8 -*-
2 # @Author : Antonio-hi
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2021-08-09 19:08:49
5 # @Last Modified : 2021-08-10 10:11:06
6 # @Description :
7
8 # import the necessary packages
9 import numpy as np
10 import base64
11 import json
12 import sys
13 import cv2
14 import os
15
16 def base64_encode_image(a):
17 # return a JSON-encoded list of the base64 encoded image, image data type, and image shape
18 # return json.dumps([base64_encode_array(a), str(a.dtype), a.shape])
19 return json.dumps([base64_encode_array(a).decode("utf-8"), str(a.dtype),
20 a.shape])
21
22 def base64_decode_image(a):
23 # grab the array, data type, and shape from the JSON-decoded object
24 (a, dtype, shape) = json.loads(a)
25
26 # set the correct data type and reshape the matrix into an image
27 a = base64_decode_array(a, dtype).reshape(shape)
28
29 # return the loaded image
30 return a
31
32 def base64_encode_array(a):
33 # return the base64 encoded array
34 return base64.b64encode(a)
35
36 def base64_decode_array(a, dtype):
37 # decode and return the array
38 return np.frombuffer(base64.b64decode(a), dtype=dtype)
39
40 def base64_encode_file(image_path):
41 filename = os.path.basename(image_path)
42 # encode image file to base64 string
43 with open(image_path, 'rb') as f:
44 buffer = f.read()
45 # convert bytes buffer string then encode to base64 string
46 img64_bytes = base64.b64encode(buffer)
47 img64_str = img64_bytes.decode('utf-8') # bytes to str
48 return json.dumps({"filename" : filename, "img64": img64_str})
49
50 def base64_to_image(img64):
51 image_buffer = base64_decode_array(img64, dtype=np.uint8)
52 # In the case of color images, the decoded images will have the channels stored in B G R order.
53 image = cv2.imdecode(image_buffer, cv2.IMREAD_COLOR)
54 return image
55
56 def bytes_to_bgr(buffer: bytes):
57 """Read a byte stream as a OpenCV image
58
59 Args:
60 buffer (TYPE): bytes of a decoded image
61 """
62 img_array = np.frombuffer(buffer, np.uint8)
63 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
64 return image
65
66 def base64_to_bgr(img64):
67 """把 base64 转换成图片
68 单通道的灰度图或四通道的透明图都将自动转换成三通道的 BGR 图
69
70 Args:
71 img64 (TYPE): Description
72
73 Returns:
74 TYPE: image is a 3-D uint8 Tensor of shape [height, width, channels] where channels is BGR
75 """
76 encoded_image = base64.b64decode(img64)
77 img_array = np.frombuffer(encoded_image, np.uint8)
78 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
79 return image
80
81 def bgr_to_base64(image):
82 """ 把图片转换成 base64 格式,过程中把图片以 JPEG 格式进行了压缩,通常这会导致图像质量变差
83
84 Args:
85 image (TYPE): image is a 3-D uint8 or uint16 Tensor of shape [height, width, channels] where channels is BGR
86
87 Returns:
88 TYPE: base64 格式的图片
89 """
90 retval, encoded_image = cv2.imencode('.jpg', image) # Encodes an image(BGR) into a memory buffer.
91 img64 = base64.b64encode(encoded_image)
92 return img64.decode('utf-8')
93
94
95 if __name__ == '__main__':
96
97 image_path = '/home/lk/Repository/Project/turnsole/demos/images/sunflower.jpg'
98
99 # 1)将图片文件转换成 base64 base64编码的字符串(理论上支持任意文件)
100 json_str = base64_encode_file(image_path)
101
102 img64_dict = json.loads(json_str)
103
104 suffix = os.path.splitext(img64_dict['filename'])[-1].lower()
105 if suffix not in ['.jpg', '.jpeg', '.png', '.bmp']:
106 print(f'[INFO] 暂不支持格式为 {suffix} 的文件!')
107
108 # 2)将 base64 编码的字符串转成图片
109 image = base64_to_image(img64_dict['img64'])
110
111 inputs = image/255.
112
113 # 3)自创的, 将 array 转 base64 编码再转回array, 中间不经历图片操作, 还能保持 array 的数据类型
114 base64_encode_json_string = base64_encode_image(inputs)
115
116 inputs = base64_decode_image(base64_encode_json_string)
117
118 print(inputs)
119
120 # 3、字符串前加 b
121 # 例: response = b'<h1>Hello World!</h1>' # b' ' 表示这是一个 bytes 对象
122
123 # 作用:
124
125 # b" "前缀表示:后面字符串是bytes 类型。
126
127 # 用处:
128
129 # 网络编程中,服务器和浏览器只认bytes 类型数据。
130
131 # 如:send 函数的参数和 recv 函数的返回值都是 bytes 类型
132
133 # 附:
134
135 # 在 Python3 中,bytes 和 str 的互相转换方式是
136 # str.encode('utf-8')
137 # bytes.decode('utf-8')
1 # -*- coding: utf-8 -*-
2 # @Author : lk
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2021-08-11 17:10:16
5 # @Last Modified : 2021-08-12 16:14:53
6 # @Description :
7
8 import os
9 import tensorflow as tf
10
11 class AgeDetector:
12 def __init__(self, model_path):
13 self.age_map = {
14 0: '0-2',
15 1: '4-6',
16 2: '8-13',
17 3: '15-20',
18 4: '25-32',
19 5: '38-43',
20 6: '48-53',
21 7: '60+'
22 }
23
24 self.model = tf.keras.models.load_model(filepath=model_path,
25 compile=False)
26 self.inference_model = self.build_inference_model()
27
28 def build_inference_model(self):
29 image = self.model.input
30 x = tf.keras.applications.mobilenet_v2.preprocess_input(image)
31 predictions = self.model(x, training=False)
32 inference_model = tf.keras.Model(inputs=image, outputs=predictions)
33 return inference_model
34
35 def predict_batch(self, images):
36 # 输入一个人脸图片列表,列表不应为空
37 images = tf.stack([tf.image.resize(image, [96, 96]) for image in images], axis=0)
38 preds = self.inference_model.predict(images)
39 indexes = tf.argmax(preds, axis=-1)
40 classes = [self.age_map[index.numpy()] for index in indexes]
41 return classes
42
43 if __name__ == '__main__':
44
45 import cv2
46 from turnsole import paths
47
48 age_det = AGE_DETECTION(model_path='./ckpt/age_detector.h5')
49
50 data_dir = '/home/lk/Project/Face_Age_Gender/data/Emotion/emotion/010003_female_yellow_22'
51
52 for image_path in paths.list_images(data_dir):
53 image = cv2.imread(image_path)
54 classes = age_det.predict_batch([image])
55
56 print(classes)
57
1 # -*- coding: utf-8 -*-
2 # @Author : Antonio-hi
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2021-08-11 18:28:36
5 # @Last Modified : 2021-08-12 19:27:59
6 # @Description :
7
8 import os
9 import time
10 import numpy as np
11 import tensorflow as tf
12
13 def convert_to_corners(boxes):
14 """Changes the box format to corner coordinates
15
16 Arguments:
17 boxes: A tensor of rank 2 or higher with a shape of `(..., num_boxes, 4)`
18 representing bounding boxes where each box is of the format
19 `[x, y, width, height]`.
20
21 Returns:
22 converted boxes with shape same as that of boxes.
23 """
24 return tf.concat(
25 [boxes[..., :2] - boxes[..., 2:] / 2.0, boxes[..., :2] + boxes[..., 2:] / 2.0],
26 axis=-1,
27 )
28
29 class AnchorBox:
30 """Generates anchor boxes.
31
32 This class has operations to generate anchor boxes for feature maps at
33 strides `[8, 16, 32, 64, 128]`. Where each anchor each box is of the
34 format `[x, y, width, height]`.
35
36 Attributes:
37 aspect_ratios: A list of float values representing the aspect ratios of
38 the anchor boxes at each location on the feature map
39 scales: A list of float values representing the scale of the anchor boxes
40 at each location on the feature map.
41 num_anchors: The number of anchor boxes at each location on feature map
42 areas: A list of float values representing the areas of the anchor
43 boxes for each feature map in the feature pyramid.
44 strides: A list of float value representing the strides for each feature
45 map in the feature pyramid.
46 """
47
48 def __init__(self):
49 self.aspect_ratios = [0.5, 1.0, 2.0]
50 self.scales = [2 ** x for x in [0, 1 / 3, 2 / 3]]
51
52 self._num_anchors = len(self.aspect_ratios) * len(self.scales)
53 self._strides = [2 ** i for i in range(3, 8)]
54 self._areas = [x ** 2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
55 self._anchor_dims = self._compute_dims()
56
57 def _compute_dims(self):
58 """Computes anchor box dimensions for all ratios and scales at all levels
59 of the feature pyramid.
60 """
61 anchor_dims_all = []
62 for area in self._areas:
63 anchor_dims = []
64 for ratio in self.aspect_ratios:
65 anchor_height = tf.math.sqrt(area / ratio)
66 anchor_width = area / anchor_height
67 dims = tf.reshape(
68 tf.stack([anchor_width, anchor_height], axis=-1), [1, 1, 2]
69 )
70 for scale in self.scales:
71 anchor_dims.append(scale * dims)
72 anchor_dims_all.append(tf.stack(anchor_dims, axis=-2))
73 return anchor_dims_all
74
75 def _get_anchors(self, feature_height, feature_width, level):
76 """Generates anchor boxes for a given feature map size and level
77
78 Arguments:
79 feature_height: An integer representing the height of the feature map.
80 feature_width: An integer representing the width of the feature map.
81 level: An integer representing the level of the feature map in the
82 feature pyramid.
83
84 Returns:
85 anchor boxes with the shape
86 `(feature_height * feature_width * num_anchors, 4)`
87 """
88 rx = tf.range(feature_width, dtype=tf.float32) + 0.5
89 ry = tf.range(feature_height, dtype=tf.float32) + 0.5
90 centers = tf.stack(tf.meshgrid(rx, ry), axis=-1) * self._strides[level - 3]
91 centers = tf.expand_dims(centers, axis=-2)
92 centers = tf.tile(centers, [1, 1, self._num_anchors, 1])
93 dims = tf.tile(
94 self._anchor_dims[level - 3], [feature_height, feature_width, 1, 1]
95 )
96 anchors = tf.concat([centers, dims], axis=-1)
97 return tf.reshape(
98 anchors, [feature_height * feature_width * self._num_anchors, 4]
99 )
100
101 def get_anchors(self, image_height, image_width):
102 """Generates anchor boxes for all the feature maps of the feature pyramid.
103
104 Arguments:
105 image_height: Height of the input image.
106 image_width: Width of the input image.
107
108 Returns:
109 anchor boxes for all the feature maps, stacked as a single tensor
110 with shape `(total_anchors, 4)`
111 """
112 anchors = [
113 self._get_anchors(
114 tf.math.ceil(image_height / 2 ** i),
115 tf.math.ceil(image_width / 2 ** i),
116 i,
117 )
118 for i in range(3, 8)
119 ]
120 return tf.concat(anchors, axis=0)
121
122 class DecodePredictions(tf.keras.layers.Layer):
123 """A Keras layer that decodes predictions of the RetinaNet model.
124
125 Attributes:
126 num_classes: Number of classes in the dataset
127 confidence_threshold: Minimum class probability, below which detections
128 are pruned.
129 nms_iou_threshold: IOU threshold for the NMS operation
130 max_detections_per_class: Maximum number of detections to retain per
131 class.
132 max_detections: Maximum number of detections to retain across all
133 classes.
134 box_variance: The scaling factors used to scale the bounding box
135 predictions.
136 """
137
138 def __init__(
139 self,
140 num_classes=80,
141 confidence_threshold=0.05,
142 nms_iou_threshold=0.5,
143 max_detections_per_class=100,
144 max_detections=100,
145 box_variance=[0.1, 0.1, 0.2, 0.2],
146 **kwargs
147 ):
148 super(DecodePredictions, self).__init__(**kwargs)
149 self.num_classes = num_classes
150 self.confidence_threshold = confidence_threshold
151 self.nms_iou_threshold = nms_iou_threshold
152 self.max_detections_per_class = max_detections_per_class
153 self.max_detections = max_detections
154
155 self._anchor_box = AnchorBox()
156 self._box_variance = tf.convert_to_tensor(
157 [0.1, 0.1, 0.2, 0.2], dtype=tf.float32
158 )
159
160 def _decode_box_predictions(self, anchor_boxes, box_predictions):
161 boxes = box_predictions * self._box_variance
162 boxes = tf.concat(
163 [
164 boxes[:, :, :2] * anchor_boxes[:, :, 2:] + anchor_boxes[:, :, :2],
165 tf.math.exp(boxes[:, :, 2:]) * anchor_boxes[:, :, 2:],
166 ],
167 axis=-1,
168 )
169 boxes_transformed = convert_to_corners(boxes)
170 return boxes_transformed
171
172 def _decode_landm_predictions(self, anchor_boxes, landm_predictions): # anchor_boxes shape=(1, 138105, 4)
173 landmarks = tf.reshape(landm_predictions,
174 [tf.shape(landm_predictions)[0], tf.shape(anchor_boxes)[1], 5, 2])
175 anchor_boxes = tf.broadcast_to(
176 input=tf.expand_dims(anchor_boxes, 2),
177 shape=[tf.shape(landm_predictions)[0], tf.shape(anchor_boxes)[1], 5, 4])
178 landmarks *= (self._box_variance[:2] * anchor_boxes[:, :, :, 2:])
179 landmarks += anchor_boxes[:, :, :, :2]
180 return landmarks
181
182 def call(self, images, predictions):
183 image_shape = tf.cast(tf.shape(images), dtype=tf.float32)
184 anchor_boxes = self._anchor_box.get_anchors(image_shape[1], image_shape[2])
185
186 box_predictions = predictions[:, :, :4]
187 cls_predictions = tf.nn.sigmoid(predictions[:, :, 4])
188 landm_predictions = predictions[:, :, 5:15]
189
190 boxes = self._decode_box_predictions(anchor_boxes[None, ...], box_predictions)
191 landmarks = self._decode_landm_predictions(anchor_boxes[None, ...], landm_predictions)
192
193 selected_indices = tf.image.non_max_suppression(
194 boxes=boxes[0],
195 scores=cls_predictions[0],
196 max_output_size=self.max_detections,
197 iou_threshold=0.5,
198 score_threshold=self.confidence_threshold
199 )
200 selected_boxes = tf.gather(boxes[0], selected_indices)
201 selected_landmarks = tf.gather(landmarks[0], selected_indices)
202
203 return selected_boxes, selected_landmarks
204
205 class FaceDetector:
206 def __init__(self, model_path, confidence_threshold=0.5):
207 self.confidence_threshold = confidence_threshold
208 self.model = tf.keras.models.load_model(filepath=model_path,
209 compile=False)
210 self.inference_model = self.build_inference_model()
211
212 def build_inference_model(self):
213 image = self.model.input
214 x = tf.keras.applications.mobilenet_v2.preprocess_input(image)
215 predictions = self.model(x, training=False)
216 detections = DecodePredictions(confidence_threshold=self.confidence_threshold)(image, predictions)
217 inference_model = tf.keras.Model(inputs=image, outputs=detections)
218 return inference_model
219
220 def resize_and_pad_image(
221 self, image, min_side=128.0, max_side=1333.0, jitter=[256, 960], stride=128.0
222 ):
223 """Resizes and pads image while preserving aspect ratio.
224
225 Returns:
226 image: Resized and padded image.
227 image_shape: Shape of the image before padding.
228 ratio: The scaling factor used to resize the image
229 """
230 image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
231 if jitter is not None:
232 min_side = tf.random.uniform((), jitter[0], jitter[1], dtype=tf.float32)
233 ratio = min_side / tf.reduce_min(image_shape)
234 if ratio * tf.reduce_max(image_shape) > max_side:
235 ratio = max_side / tf.reduce_max(image_shape)
236 image_shape = ratio * image_shape # tf.float32
237 image = tf.image.resize(image, tf.cast(image_shape, dtype=tf.int32))
238 padded_image_shape = tf.cast(
239 tf.math.ceil(image_shape / stride) * stride, dtype=tf.int32
240 )
241 image = tf.image.pad_to_bounding_box(
242 image, 0, 0, padded_image_shape[0], padded_image_shape[1]
243 )
244 return image, image_shape, ratio
245
246 def predict(self, image, min_side=128):
247
248 # input a image return boxes and landmarks
249 image, _, ratio = self.resize_and_pad_image(image, min_side=min_side, jitter=None)
250
251 detections = self.inference_model.predict(tf.expand_dims(image, axis=0))
252 boxes, landmarks = detections
253
254 boxes = np.array(boxes/ratio, dtype=np.int32)
255 landmarks = np.array(landmarks/ratio, dtype=np.int32)
256 return boxes, landmarks
257
258 # 格式转换
259 results = {
260 'boxes': boxes.tolist(),
261 'landmarks': landmarks.tolist(),
262 }
263 return results
264
265 if __name__ == '__main__':
266 import cv2
267
268 facedetector = FaceDetector(model_path='./model/facedetector.h5')
269
270 image_path = '/home/lk/Project/Face_Age_Gender/data/WIDER/WIDER_train/images/28--Sports_Fan/28_Sports_Fan_Sports_Fan_28_615.jpg'
271 # image_path = '/home/lk/Project/Face_Age_Gender/data/Emotion/emotion/010021_female_yellow_22/angry.jpg'
272
273 image = cv2.imread(image_path)
274
275 x = facedetector.predict(image, min_side=256)
276
277 print(x)
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Created Date : 2021-02-24 13:58:46
5 # @Last Modified : 2021-03-05 18:14:17
6 # @Description :
7
8 import tensorflow as tf
9
10 from .nets.efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3
11 from .nets.efficientnet import EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7
12
13 def load_backbone(phi, input_tensor, weights='imagenet'):
14 if phi == 0:
15 model = EfficientNetB0(include_top=False,
16 weights=weights,
17 input_tensor=input_tensor)
18 # 从这些层提取特征
19 layer_names = [
20 'block2b_add', # 1/4
21 'block3b_add', # 1/8
22 'block5c_add', # 1/16
23 'block7a_project_bn', # 1/32
24 ]
25 elif phi == 1:
26 model = EfficientNetB1(include_top=False,
27 weights=weights,
28 input_tensor=input_tensor)
29 layer_names = [
30 'block2c_add', # 1/4
31 'block3c_add', # 1/8
32 'block5d_add', # 1/16
33 'block7b_add', # 1/32
34 ]
35 elif phi == 2:
36 model = EfficientNetB2(include_top=False,
37 weights=weights,
38 input_tensor=input_tensor)
39 layer_names = [
40 'block2c_add', # 1/4
41 'block3c_add', # 1/8
42 'block5d_add', # 1/16
43 'block7b_add', # 1/32
44 ]
45 elif phi == 3:
46 model = EfficientNetB3(include_top=False,
47 weights=weights,
48 input_tensor=input_tensor)
49 layer_names = [
50 'block2c_add', # 1/4
51 'block3c_add', # 1/8
52 'block5e_add', # 1/16
53 'block7b_add', # 1/32
54 ]
55 elif phi == 4:
56 model = EfficientNetB4(include_top=False,
57 weights=weights,
58 input_tensor=input_tensor)
59 layer_names = [
60 'block2c_add', # 1/4
61 'block3d_add', # 1/8
62 'block5f_add', # 1/16
63 'block7b_add', # 1/32
64 ]
65 elif phi == 5:
66 model = EfficientNetB5(include_top=False,
67 weights=weights,
68 input_tensor=input_tensor)
69 layer_names = [
70 'block2e_add', # 1/4
71 'block3e_add', # 1/8
72 'block5g_add', # 1/16
73 'block7c_add', # 1/32
74 ]
75 elif phi == 6:
76 model = EfficientNetB6(include_top=False,
77 weights=weights,
78 input_tensor=input_tensor)
79 layer_names = [
80 'block2f_add', # 1/4
81 'block3f_add', # 1/8
82 'block5h_add', # 1/16
83 'block7c_add', # 1/32
84 ]
85 elif phi == 7:
86 model = EfficientNetB7(include_top=False,
87 weights=weights,
88 input_tensor=input_tensor)
89 layer_names = [
90 'block2g_add', # 1/4
91 'block3g_add', # 1/8
92 'block5j_add', # 1/16
93 'block7d_add', # 1/32
94 ]
95
96 skips = [model.get_layer(name).output for name in layer_names]
97 return model, skips
98
99 def EasyDet(phi=0, input_size=(None, None, 3), weights='imagenet'):
100 image_input = tf.keras.layers.Input(shape=input_size)
101
102 backbone, skips = load_backbone(phi=phi, input_tensor=image_input, weights=weights)
103 C2, C3, C4, C5 = skips
104
105 in2 = tf.keras.layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in2')(C2)
106 in3 = tf.keras.layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in3')(C3)
107 in4 = tf.keras.layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in4')(C4)
108 in5 = tf.keras.layers.Conv2D(256, (1, 1), padding='same', kernel_initializer='he_normal', name='in5')(C5)
109
110 # 1 / 32 * 8 = 1 / 4
111 P5 = tf.keras.layers.UpSampling2D(size=(8, 8))(
112 tf.keras.layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(in5))
113 # 1 / 16 * 4 = 1 / 4
114 out4 = tf.keras.layers.Add()([in4, tf.keras.layers.UpSampling2D(size=(2, 2))(in5)])
115 P4 = tf.keras.layers.UpSampling2D(size=(4, 4))(
116 tf.keras.layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(out4))
117 # 1 / 8 * 2 = 1 / 4
118 out3 = tf.keras.layers.Add()([in3, tf.keras.layers.UpSampling2D(size=(2, 2))(out4)])
119 P3 = tf.keras.layers.UpSampling2D(size=(2, 2))(
120 tf.keras.layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(out3))
121 # 1 / 4
122 P2 = tf.keras.layers.Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal')(
123 tf.keras.layers.Add()([in2, tf.keras.layers.UpSampling2D(size=(2, 2))(out3)]))
124 # (b, 1/4, 1/4, 256)
125 fuse = tf.keras.layers.Concatenate()([P2, P3, P4, P5])
126
127 model = tf.keras.models.Model(inputs=image_input, outputs=fuse)
128 return model
129
130
131 if __name__ == '__main__':
132 model = EasyDet(phi=0)
133 model.summary()
134
135 import time
136 import numpy as np
137
138 x = np.random.random_sample((1, 640, 640, 3))
139 # warm up
140 output = model.predict(x)
141
142 print('\n[INFO] Test start')
143 time_start = time.time()
144 for i in range(1000):
145 output = model.predict(x)
146
147 time_end = time.time()
148 print('[INFO] Time used: {:.2f} ms'.format((time_end - time_start)*1000/(i+1)))
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 # pylint: disable=invalid-name
16 # pylint: disable=missing-docstring
17 """EfficientNet models for Keras.
18 Reference:
19 - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
20 https://arxiv.org/abs/1905.11946) (ICML 2019)
21 """
22 from __future__ import absolute_import
23 from __future__ import division
24 from __future__ import print_function
25
26 import copy
27 import math
28
29 from tensorflow.keras import layers
30
31 from tensorflow.python.keras import backend
32 from tensorflow.python.keras.applications import imagenet_utils
33 from tensorflow.python.keras.engine import training
34 # from tensorflow.python.keras.layers import VersionAwareLayers
35 from tensorflow.python.keras.utils import data_utils
36 from tensorflow.python.keras.utils import layer_utils
37 from tensorflow.python.lib.io import file_io
38 from tensorflow.python.util.tf_export import keras_export
39
40
41 BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/keras-applications/'
42
43 WEIGHTS_HASHES = {
44 'b0': ('902e53a9f72be733fc0bcb005b3ebbac',
45 '50bc09e76180e00e4465e1a485ddc09d'),
46 'b1': ('1d254153d4ab51201f1646940f018540',
47 '74c4e6b3e1f6a1eea24c589628592432'),
48 'b2': ('b15cce36ff4dcbd00b6dd88e7857a6ad',
49 '111f8e2ac8aa800a7a99e3239f7bfb39'),
50 'b3': ('ffd1fdc53d0ce67064dc6a9c7960ede0',
51 'af6d107764bb5b1abb91932881670226'),
52 'b4': ('18c95ad55216b8f92d7e70b3a046e2fc',
53 'ebc24e6d6c33eaebbd558eafbeedf1ba'),
54 'b5': ('ace28f2a6363774853a83a0b21b9421a',
55 '38879255a25d3c92d5e44e04ae6cec6f'),
56 'b6': ('165f6e37dce68623721b423839de8be5',
57 '9ecce42647a20130c1f39a5d4cb75743'),
58 'b7': ('8c03f828fec3ef71311cd463b6759d99',
59 'cbcfe4450ddf6f3ad90b1b398090fe4a'),
60 }
61
62 DEFAULT_BLOCKS_ARGS = [{
63 'kernel_size': 3,
64 'repeats': 1,
65 'filters_in': 32,
66 'filters_out': 16,
67 'expand_ratio': 1,
68 'id_skip': True,
69 'strides': 1,
70 'se_ratio': 0.25
71 }, {
72 'kernel_size': 3,
73 'repeats': 2,
74 'filters_in': 16,
75 'filters_out': 24,
76 'expand_ratio': 6,
77 'id_skip': True,
78 'strides': 2,
79 'se_ratio': 0.25
80 }, {
81 'kernel_size': 5,
82 'repeats': 2,
83 'filters_in': 24,
84 'filters_out': 40,
85 'expand_ratio': 6,
86 'id_skip': True,
87 'strides': 2,
88 'se_ratio': 0.25
89 }, {
90 'kernel_size': 3,
91 'repeats': 3,
92 'filters_in': 40,
93 'filters_out': 80,
94 'expand_ratio': 6,
95 'id_skip': True,
96 'strides': 2,
97 'se_ratio': 0.25
98 }, {
99 'kernel_size': 5,
100 'repeats': 3,
101 'filters_in': 80,
102 'filters_out': 112,
103 'expand_ratio': 6,
104 'id_skip': True,
105 'strides': 1,
106 'se_ratio': 0.25
107 }, {
108 'kernel_size': 5,
109 'repeats': 4,
110 'filters_in': 112,
111 'filters_out': 192,
112 'expand_ratio': 6,
113 'id_skip': True,
114 'strides': 2,
115 'se_ratio': 0.25
116 }, {
117 'kernel_size': 3,
118 'repeats': 1,
119 'filters_in': 192,
120 'filters_out': 320,
121 'expand_ratio': 6,
122 'id_skip': True,
123 'strides': 1,
124 'se_ratio': 0.25
125 }]
126
127 CONV_KERNEL_INITIALIZER = {
128 'class_name': 'VarianceScaling',
129 'config': {
130 'scale': 2.0,
131 'mode': 'fan_out',
132 'distribution': 'truncated_normal'
133 }
134 }
135
136 DENSE_KERNEL_INITIALIZER = {
137 'class_name': 'VarianceScaling',
138 'config': {
139 'scale': 1. / 3.,
140 'mode': 'fan_out',
141 'distribution': 'uniform'
142 }
143 }
144
145 # layers = VersionAwareLayers()
146
147 BASE_DOCSTRING = """Instantiates the {name} architecture.
148 Reference:
149 - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
150 https://arxiv.org/abs/1905.11946) (ICML 2019)
151 Optionally loads weights pre-trained on ImageNet.
152 Note that the data format convention used by the model is
153 the one specified in your Keras config at `~/.keras/keras.json`.
154 If you have never configured it, it defaults to `"channels_last"`.
155 Arguments:
156 include_top: Whether to include the fully-connected
157 layer at the top of the network. Defaults to True.
158 weights: One of `None` (random initialization),
159 'imagenet' (pre-training on ImageNet),
160 or the path to the weights file to be loaded. Defaults to 'imagenet'.
161 input_tensor: Optional Keras tensor
162 (i.e. output of `layers.Input()`)
163 to use as image input for the model.
164 input_shape: Optional shape tuple, only to be specified
165 if `include_top` is False.
166 It should have exactly 3 inputs channels.
167 pooling: Optional pooling mode for feature extraction
168 when `include_top` is `False`. Defaults to None.
169 - `None` means that the output of the model will be
170 the 4D tensor output of the
171 last convolutional layer.
172 - `avg` means that global average pooling
173 will be applied to the output of the
174 last convolutional layer, and thus
175 the output of the model will be a 2D tensor.
176 - `max` means that global max pooling will
177 be applied.
178 classes: Optional number of classes to classify images
179 into, only to be specified if `include_top` is True, and
180 if no `weights` argument is specified. Defaults to 1000 (number of
181 ImageNet classes).
182 classifier_activation: A `str` or callable. The activation function to use
183 on the "top" layer. Ignored unless `include_top=True`. Set
184 `classifier_activation=None` to return the logits of the "top" layer.
185 Defaults to 'softmax'.
186 Returns:
187 A `keras.Model` instance.
188 """
189
190
191 def EfficientNet(
192 width_coefficient,
193 depth_coefficient,
194 default_size,
195 dropout_rate=0.2,
196 drop_connect_rate=0.2,
197 depth_divisor=8,
198 activation='swish',
199 blocks_args='default',
200 model_name='efficientnet',
201 include_top=True,
202 weights='imagenet',
203 input_tensor=None,
204 input_shape=None,
205 pooling=None,
206 classes=1000,
207 classifier_activation='softmax'):
208 """Instantiates the EfficientNet architecture using given scaling coefficients.
209 Reference:
210 - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
211 https://arxiv.org/abs/1905.11946) (ICML 2019)
212 Optionally loads weights pre-trained on ImageNet.
213 Note that the data format convention used by the model is
214 the one specified in your Keras config at `~/.keras/keras.json`.
215 Arguments:
216 width_coefficient: float, scaling coefficient for network width.
217 depth_coefficient: float, scaling coefficient for network depth.
218 default_size: integer, default input image size.
219 dropout_rate: float, dropout rate before final classifier layer.
220 drop_connect_rate: float, dropout rate at skip connections.
221 depth_divisor: integer, a unit of network width.
222 activation: activation function.
223 blocks_args: list of dicts, parameters to construct block modules.
224 model_name: string, model name.
225 include_top: whether to include the fully-connected
226 layer at the top of the network.
227 weights: one of `None` (random initialization),
228 'imagenet' (pre-training on ImageNet),
229 or the path to the weights file to be loaded.
230 input_tensor: optional Keras tensor
231 (i.e. output of `layers.Input()`)
232 to use as image input for the model.
233 input_shape: optional shape tuple, only to be specified
234 if `include_top` is False.
235 It should have exactly 3 inputs channels.
236 pooling: optional pooling mode for feature extraction
237 when `include_top` is `False`.
238 - `None` means that the output of the model will be
239 the 4D tensor output of the
240 last convolutional layer.
241 - `avg` means that global average pooling
242 will be applied to the output of the
243 last convolutional layer, and thus
244 the output of the model will be a 2D tensor.
245 - `max` means that global max pooling will
246 be applied.
247 classes: optional number of classes to classify images
248 into, only to be specified if `include_top` is True, and
249 if no `weights` argument is specified.
250 classifier_activation: A `str` or callable. The activation function to use
251 on the "top" layer. Ignored unless `include_top=True`. Set
252 `classifier_activation=None` to return the logits of the "top" layer.
253 Returns:
254 A `keras.Model` instance.
255 Raises:
256 ValueError: in case of invalid argument for `weights`,
257 or invalid input shape.
258 ValueError: if `classifier_activation` is not `softmax` or `None` when
259 using a pretrained top layer.
260 """
261 if blocks_args == 'default':
262 blocks_args = DEFAULT_BLOCKS_ARGS
263
264 if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
265 raise ValueError('The `weights` argument should be either '
266 '`None` (random initialization), `imagenet` '
267 '(pre-training on ImageNet), '
268 'or the path to the weights file to be loaded.')
269
270 if weights == 'imagenet' and include_top and classes != 1000:
271 raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
272 ' as true, `classes` should be 1000')
273
274 # Determine proper input shape
275 input_shape = imagenet_utils.obtain_input_shape(
276 input_shape,
277 default_size=default_size,
278 min_size=32,
279 data_format=backend.image_data_format(),
280 require_flatten=include_top,
281 weights=weights)
282
283 if input_tensor is None:
284 img_input = layers.Input(shape=input_shape)
285 else:
286 if not backend.is_keras_tensor(input_tensor):
287 img_input = layers.Input(tensor=input_tensor, shape=input_shape)
288 else:
289 img_input = input_tensor
290
291 bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
292
293 def round_filters(filters, divisor=depth_divisor):
294 """Round number of filters based on depth multiplier."""
295 filters *= width_coefficient
296 new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
297 # Make sure that round down does not go down by more than 10%.
298 if new_filters < 0.9 * filters:
299 new_filters += divisor
300 return int(new_filters)
301
302 def round_repeats(repeats):
303 """Round number of repeats based on depth multiplier."""
304 return int(math.ceil(depth_coefficient * repeats))
305
306 # Build stem
307 x = img_input
308 x = layers.experimental.preprocessing.Rescaling(1. / 255.)(x)
309 x = layers.experimental.preprocessing.Normalization(axis=bn_axis)(x)
310
311 x = layers.ZeroPadding2D(
312 padding=imagenet_utils.correct_pad(x, 3),
313 name='stem_conv_pad')(x)
314 x = layers.Conv2D(
315 round_filters(32),
316 3,
317 strides=2,
318 padding='valid',
319 use_bias=False,
320 kernel_initializer=CONV_KERNEL_INITIALIZER,
321 name='stem_conv')(x)
322 x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
323 x = layers.Activation(activation, name='stem_activation')(x)
324
325 # Build blocks
326 blocks_args = copy.deepcopy(blocks_args)
327
328 b = 0
329 blocks = float(sum(round_repeats(args['repeats']) for args in blocks_args))
330 for (i, args) in enumerate(blocks_args):
331 assert args['repeats'] > 0
332 # Update block input and output filters based on depth multiplier.
333 args['filters_in'] = round_filters(args['filters_in'])
334 args['filters_out'] = round_filters(args['filters_out'])
335
336 for j in range(round_repeats(args.pop('repeats'))):
337 # The first block needs to take care of stride and filter size increase.
338 if j > 0:
339 args['strides'] = 1
340 args['filters_in'] = args['filters_out']
341 x = block(
342 x,
343 activation,
344 drop_connect_rate * b / blocks,
345 name='block{}{}_'.format(i + 1, chr(j + 97)),
346 **args)
347 b += 1
348
349 # Build top
350 x = layers.Conv2D(
351 round_filters(1280),
352 1,
353 padding='same',
354 use_bias=False,
355 kernel_initializer=CONV_KERNEL_INITIALIZER,
356 name='top_conv')(x)
357 x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
358 x = layers.Activation(activation, name='top_activation')(x)
359 if include_top:
360 x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
361 if dropout_rate > 0:
362 x = layers.Dropout(dropout_rate, name='top_dropout')(x)
363 imagenet_utils.validate_activation(classifier_activation, weights)
364 x = layers.Dense(
365 classes,
366 activation=classifier_activation,
367 kernel_initializer=DENSE_KERNEL_INITIALIZER,
368 name='predictions')(x)
369 else:
370 if pooling == 'avg':
371 x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
372 elif pooling == 'max':
373 x = layers.GlobalMaxPooling2D(name='max_pool')(x)
374
375 # Ensure that the model takes into account
376 # any potential predecessors of `input_tensor`.
377 if input_tensor is not None:
378 inputs = layer_utils.get_source_inputs(input_tensor)
379 else:
380 inputs = img_input
381
382 # Create model.
383 model = training.Model(inputs, x, name=model_name)
384
385 # Load weights.
386 if weights == 'imagenet':
387 if include_top:
388 file_suffix = '.h5'
389 file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
390 else:
391 file_suffix = '_notop.h5'
392 file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
393 file_name = model_name + file_suffix
394 weights_path = data_utils.get_file(
395 file_name,
396 BASE_WEIGHTS_PATH + file_name,
397 cache_subdir='models',
398 file_hash=file_hash)
399 model.load_weights(weights_path)
400 elif weights is not None:
401 model.load_weights(weights)
402 return model
403
404
405 def block(inputs,
406 activation='swish',
407 drop_rate=0.,
408 name='',
409 filters_in=32,
410 filters_out=16,
411 kernel_size=3,
412 strides=1,
413 expand_ratio=1,
414 se_ratio=0.,
415 id_skip=True):
416 """An inverted residual block.
417 Arguments:
418 inputs: input tensor.
419 activation: activation function.
420 drop_rate: float between 0 and 1, fraction of the input units to drop.
421 name: string, block label.
422 filters_in: integer, the number of input filters.
423 filters_out: integer, the number of output filters.
424 kernel_size: integer, the dimension of the convolution window.
425 strides: integer, the stride of the convolution.
426 expand_ratio: integer, scaling coefficient for the input filters.
427 se_ratio: float between 0 and 1, fraction to squeeze the input filters.
428 id_skip: boolean.
429 Returns:
430 output tensor for the block.
431 """
432 bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
433
434 # Expansion phase
435 filters = filters_in * expand_ratio
436 if expand_ratio != 1:
437 x = layers.Conv2D(
438 filters,
439 1,
440 padding='same',
441 use_bias=False,
442 kernel_initializer=CONV_KERNEL_INITIALIZER,
443 name=name + 'expand_conv')(
444 inputs)
445 x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
446 x = layers.Activation(activation, name=name + 'expand_activation')(x)
447 else:
448 x = inputs
449
450 # Depthwise Convolution
451 if strides == 2:
452 x = layers.ZeroPadding2D(
453 padding=imagenet_utils.correct_pad(x, kernel_size),
454 name=name + 'dwconv_pad')(x)
455 conv_pad = 'valid'
456 else:
457 conv_pad = 'same'
458 x = layers.DepthwiseConv2D(
459 kernel_size,
460 strides=strides,
461 padding=conv_pad,
462 use_bias=False,
463 depthwise_initializer=CONV_KERNEL_INITIALIZER,
464 name=name + 'dwconv')(x)
465 x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
466 x = layers.Activation(activation, name=name + 'activation')(x)
467
468 # Squeeze and Excitation phase
469 if 0 < se_ratio <= 1:
470 filters_se = max(1, int(filters_in * se_ratio))
471 se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
472 se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
473 se = layers.Conv2D(
474 filters_se,
475 1,
476 padding='same',
477 activation=activation,
478 kernel_initializer=CONV_KERNEL_INITIALIZER,
479 name=name + 'se_reduce')(
480 se)
481 se = layers.Conv2D(
482 filters,
483 1,
484 padding='same',
485 activation='sigmoid',
486 kernel_initializer=CONV_KERNEL_INITIALIZER,
487 name=name + 'se_expand')(se)
488 x = layers.multiply([x, se], name=name + 'se_excite')
489
490 # Output phase
491 x = layers.Conv2D(
492 filters_out,
493 1,
494 padding='same',
495 use_bias=False,
496 kernel_initializer=CONV_KERNEL_INITIALIZER,
497 name=name + 'project_conv')(x)
498 x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
499 if id_skip and strides == 1 and filters_in == filters_out:
500 if drop_rate > 0:
501 x = layers.Dropout(
502 drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x)
503 x = layers.add([x, inputs], name=name + 'add')
504 return x
505
506
507 @keras_export('keras.applications.efficientnet.EfficientNetB0',
508 'keras.applications.EfficientNetB0')
509 def EfficientNetB0(include_top=True,
510 weights='imagenet',
511 input_tensor=None,
512 input_shape=None,
513 pooling=None,
514 classes=1000,
515 classifier_activation='softmax',
516 **kwargs):
517 return EfficientNet(
518 1.0,
519 1.0,
520 224,
521 0.2,
522 model_name='efficientnetb0',
523 include_top=include_top,
524 weights=weights,
525 input_tensor=input_tensor,
526 input_shape=input_shape,
527 pooling=pooling,
528 classes=classes,
529 classifier_activation=classifier_activation,
530 **kwargs)
531
532
533 @keras_export('keras.applications.efficientnet.EfficientNetB1',
534 'keras.applications.EfficientNetB1')
535 def EfficientNetB1(include_top=True,
536 weights='imagenet',
537 input_tensor=None,
538 input_shape=None,
539 pooling=None,
540 classes=1000,
541 classifier_activation='softmax',
542 **kwargs):
543 return EfficientNet(
544 1.0,
545 1.1,
546 240,
547 0.2,
548 model_name='efficientnetb1',
549 include_top=include_top,
550 weights=weights,
551 input_tensor=input_tensor,
552 input_shape=input_shape,
553 pooling=pooling,
554 classes=classes,
555 classifier_activation=classifier_activation,
556 **kwargs)
557
558
559 @keras_export('keras.applications.efficientnet.EfficientNetB2',
560 'keras.applications.EfficientNetB2')
561 def EfficientNetB2(include_top=True,
562 weights='imagenet',
563 input_tensor=None,
564 input_shape=None,
565 pooling=None,
566 classes=1000,
567 classifier_activation='softmax',
568 **kwargs):
569 return EfficientNet(
570 1.1,
571 1.2,
572 260,
573 0.3,
574 model_name='efficientnetb2',
575 include_top=include_top,
576 weights=weights,
577 input_tensor=input_tensor,
578 input_shape=input_shape,
579 pooling=pooling,
580 classes=classes,
581 classifier_activation=classifier_activation,
582 **kwargs)
583
584
585 @keras_export('keras.applications.efficientnet.EfficientNetB3',
586 'keras.applications.EfficientNetB3')
587 def EfficientNetB3(include_top=True,
588 weights='imagenet',
589 input_tensor=None,
590 input_shape=None,
591 pooling=None,
592 classes=1000,
593 classifier_activation='softmax',
594 **kwargs):
595 return EfficientNet(
596 1.2,
597 1.4,
598 300,
599 0.3,
600 model_name='efficientnetb3',
601 include_top=include_top,
602 weights=weights,
603 input_tensor=input_tensor,
604 input_shape=input_shape,
605 pooling=pooling,
606 classes=classes,
607 classifier_activation=classifier_activation,
608 **kwargs)
609
610
611 @keras_export('keras.applications.efficientnet.EfficientNetB4',
612 'keras.applications.EfficientNetB4')
613 def EfficientNetB4(include_top=True,
614 weights='imagenet',
615 input_tensor=None,
616 input_shape=None,
617 pooling=None,
618 classes=1000,
619 classifier_activation='softmax',
620 **kwargs):
621 return EfficientNet(
622 1.4,
623 1.8,
624 380,
625 0.4,
626 model_name='efficientnetb4',
627 include_top=include_top,
628 weights=weights,
629 input_tensor=input_tensor,
630 input_shape=input_shape,
631 pooling=pooling,
632 classes=classes,
633 classifier_activation=classifier_activation,
634 **kwargs)
635
636
637 @keras_export('keras.applications.efficientnet.EfficientNetB5',
638 'keras.applications.EfficientNetB5')
639 def EfficientNetB5(include_top=True,
640 weights='imagenet',
641 input_tensor=None,
642 input_shape=None,
643 pooling=None,
644 classes=1000,
645 classifier_activation='softmax',
646 **kwargs):
647 return EfficientNet(
648 1.6,
649 2.2,
650 456,
651 0.4,
652 model_name='efficientnetb5',
653 include_top=include_top,
654 weights=weights,
655 input_tensor=input_tensor,
656 input_shape=input_shape,
657 pooling=pooling,
658 classes=classes,
659 classifier_activation=classifier_activation,
660 **kwargs)
661
662
663 @keras_export('keras.applications.efficientnet.EfficientNetB6',
664 'keras.applications.EfficientNetB6')
665 def EfficientNetB6(include_top=True,
666 weights='imagenet',
667 input_tensor=None,
668 input_shape=None,
669 pooling=None,
670 classes=1000,
671 classifier_activation='softmax',
672 **kwargs):
673 return EfficientNet(
674 1.8,
675 2.6,
676 528,
677 0.5,
678 model_name='efficientnetb6',
679 include_top=include_top,
680 weights=weights,
681 input_tensor=input_tensor,
682 input_shape=input_shape,
683 pooling=pooling,
684 classes=classes,
685 classifier_activation=classifier_activation,
686 **kwargs)
687
688
689 @keras_export('keras.applications.efficientnet.EfficientNetB7',
690 'keras.applications.EfficientNetB7')
691 def EfficientNetB7(include_top=True,
692 weights='imagenet',
693 input_tensor=None,
694 input_shape=None,
695 pooling=None,
696 classes=1000,
697 classifier_activation='softmax',
698 **kwargs):
699 return EfficientNet(
700 2.0,
701 3.1,
702 600,
703 0.5,
704 model_name='efficientnetb7',
705 include_top=include_top,
706 weights=weights,
707 input_tensor=input_tensor,
708 input_shape=input_shape,
709 pooling=pooling,
710 classes=classes,
711 classifier_activation=classifier_activation,
712 **kwargs)
713
714
715 EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB0')
716 EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB1')
717 EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB2')
718 EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB3')
719 EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB4')
720 EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB5')
721 EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB6')
722 EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB7')
723
724
725 @keras_export('keras.applications.efficientnet.preprocess_input')
726 def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
727 return x
728
729
730 @keras_export('keras.applications.efficientnet.decode_predictions')
731 def decode_predictions(preds, top=5):
732 return imagenet_utils.decode_predictions(preds, top=top)
733
734
735 decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
...\ No newline at end of file ...\ No newline at end of file
1 from . import angle_detector
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : lk
3 # @Email : 9428.al@gmail.com
4 # @Created Date : 2019-09-03 15:40:54
5 # @Last Modified : 2022-07-18 16:10:36
6 # @Description :
7
8 import os
9 import cv2
10 import time
11 import numpy as np
12 # import tensorflow as tf
13
14 # import grpc
15 # from tensorflow_serving.apis import predict_pb2
16 # from tensorflow_serving.apis import prediction_service_pb2_grpc
17
18 import tritonclient.grpc as grpcclient
19
20
21 def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
22 '''
23 Resize the input image according to the dimensions and keep aspect ratio of this image
24 '''
25 dim = None
26 (h, w) = image.shape[:2]
27
28 # if both the width and height are None, then return the original image
29 if width is None and height is None:
30 return image
31
32 # check to see if the width is None
33 if width is None:
34 # calculate the ratio of the height and construct the dimensions
35 r = height / float(h)
36 dim = (int(w * r), height)
37
38 # otherwise, the height is None
39 else:
40 # calculate the ratio of the width and construct the dimensions
41 r = width / float(w)
42 dim = (width, int(h * r))
43
44 # resize the image
45 resized = cv2.resize(image, dim, interpolation=inter)
46
47 return resized
48
49 def predict(image):
50
51 ROTATE = [0, 90, 180, 270]
52
53 # pre-process the image for classification
54 # Test 1: 直接resize到目标尺寸
55 # image = cv2.resize(image, (512, 512))
56
57 # Test 2: 按照短边resize到目标尺寸,长边按比例缩放
58 short_side = 768
59 if min(image.shape[:2]) > short_side:
60 image = resize(image, width=short_side) if image.shape[0] > image.shape[1] else resize(image, height=short_side)
61
62 # Test 3: 带padding的resize策略
63 # image = resize_image_with_pad(image, 1024, 1024)
64
65 # Test 4: 直接使用原图
66 # image = image
67
68 image = np.array(image, dtype="float32")
69 image = 2 * (image / 255.0) - 1 # Let data input to be normalized to the [-1,1] range
70 input_data = np.expand_dims(image, 0)
71
72 # options = [('grpc.max_send_message_length', 1000 * 1024 * 1024),
73 # ('grpc.max_receive_message_length', 1000 * 1024 * 1024)]
74 # channel = grpc.insecure_channel('localhost:8500', options=options)
75 # stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
76
77 # request = predict_pb2.PredictRequest()
78 # request.model_spec.name = 'adc_model'
79 # request.model_spec.signature_name = 'serving_default'
80 # request.inputs['input_1'].CopyFrom(tf.make_tensor_proto(inputs))
81
82 # result = stub.Predict(request, 100.0) # 100 secs timeout
83
84 # preds = tf.make_ndarray(result.outputs['dense'])
85
86 triton_client = grpcclient.InferenceServerClient("localhost:8001")
87
88 # Initialize the data
89 inputs = [grpcclient.InferInput('input_1', input_data.shape, "FP32")] # [InferInput 类的一个对象用于描述推理请求的输入张量。]
90 inputs[0].set_data_from_numpy(input_data) # 从指定的numpy数组中获取张量数据与此对象关联的输入
91 outputs = [grpcclient.InferRequestedOutput("dense")]
92
93 # Inference
94 results = triton_client.infer(
95 model_name="adc_model",
96 inputs=inputs,
97 outputs=outputs
98 )
99 # Get the output arrays from the results
100 preds = results.as_numpy("dense")
101
102 index = np.argmax(preds, axis=-1)[0]
103
104 return index
105 # return ROTATE[index]
106
107 def DegreeTrans(theta):
108 '''
109 Convert radians to angles
110 '''
111 res = theta / np.pi * 180
112 return res
113
114 def rotateImage(src, degree):
115 '''
116 Calculate the rotation matrix and rotate the image
117 param src:image after rot90
118 param degree:the Hough degree
119 '''
120 h, w = src.shape[:2]
121 RotateMatrix = cv2.getRotationMatrix2D((w/2.0, h/2.0), degree, 1)
122 # affine transformation, background color fills white
123 rotate = cv2.warpAffine(src, RotateMatrix, (w, h), borderValue=(255, 255, 255))
124 return rotate
125
126 def CalcDegree(srcImage):
127 '''
128 Calculating angles by Hough transform
129 param srcImage:image after rot90
130 '''
131 midImage = cv2.cvtColor(srcImage, cv2.COLOR_BGR2GRAY)
132 dstImage = cv2.Canny(midImage, 100, 300, 3)
133 lineimage = srcImage.copy()
134
135 # 通过霍夫变换检测直线
136 # 第4个参数(th)就是阈值,阈值越大,检测精度越高
137 th = 500
138 while True:
139 if th > 0:
140 lines = cv2.HoughLines(dstImage, 1, np.pi/180, th)
141 else:
142 lines = None
143 break
144 if lines is not None:
145 if len(lines) > 10:
146 break
147 else:
148 th -= 50
149 # print ('阈值是:', th)
150 else:
151 th -= 100
152 # print ('阈值是:', th)
153 continue
154
155 sum_theta = 0
156 num_theta = 0
157 if lines is not None:
158 for i in range(len(lines)):
159 for rho, theta in lines[i]:
160 # control the angle of line between -30 to +30
161 if theta > 1 and theta < 2.1:
162 sum_theta += theta
163 num_theta += 1
164 # Average all angles
165 if num_theta == 0:
166 average = np.pi/2
167 else:
168 average = sum_theta / num_theta
169
170 return DegreeTrans(average) - 90
171
172 def ADC(image, fine_degree=False):
173 '''
174 return param rotate: Corrected image
175 return param angle_degree:image offset image
176 '''
177
178 # Return a wide angle index
179 img = np.copy(image)
180 angle_index = predict(img)
181 img_rot = np.rot90(img, -angle_index)
182
183 # if fine_degree then the image will be corrected more accurately based on character line features.
184 if fine_degree:
185 degree = CalcDegree(img_rot)
186 angle_degree = (angle_index * 90 - degree) % 360
187 rotate = rotateImage(img_rot, degree)
188 return rotate, angle_degree
189
190 return img_rot, int(angle_index*90)
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-07-28 19:40:10
5 # @Last Modified : 2022-09-08 18:00:40
6 # @Description :
7
8 from .text_rec import textRecServer
9
10 text_recognizer = textRecServer()
1 alphabet = """ \
2 \
3 \
4 \
5 \
6 \
7 \
8 \
9 \
10 \
11 \
12 \
13 \
14 \
15 \
16 \
17 \
18 \
19 \
20 \
21 \
22 \
23 \
24 \
25 \
26 \
27 \
28 \
29 \
30 \
31 \
32 \
33 \
34 \
35 \
36 \
37 \
38 \
39 \
40 \
41 \
42 \
43 \
44 \
45 亿\
46 \
47 \
48 \
49 \
50 \
51 \
52 \
53 \
54 广\
55 \
56 \
57 \
58 \
59 \
60 \
61 \
62 \
63 \
64 \
65 \
66 \
67 \
68 \
69 \
70 \
71 \
72 \
73 \
74 \
75 \
76 \
77 \
78 \
79 \
80 \
81 \
82 \
83 \
84 \
85 \
86 \
87 \
88 \
89 \
90 \
91 \
92 \
93 \
94 \
95 \
96 \
97 \
98 \
99 \
100 \
101 \
102 \
103 \
104 \
105 \
106 \
107 \
108 \
109 \
110 \
111 \
112 \
113 \
114 \
115 \
116 \
117 \
118 \
119 \
120 \
121 \
122 \
123 \
124 \
125 \
126 \
127 \
128 \
129 \
130 \
131 \
132 \
133 \
134 \
135 \
136 \
137 \
138 \
139 \
140 \
141 \
142 \
143 \
144 \
145 \
146 \
147 \
148 \
149 \
150 \
151 \
152 \
153 \
154 \
155 \
156 \
157 \
158 \
159 \
160 \
161 \
162 \
163 \
164 \
165 \
166 \
167 \
168 \
169 \
170 \
171 \
172 \
173 \
174 \
175 \
176 \
177 \
178 \
179 \
180 \
181 \
182 \
183 \
184 \
185 \
186 \
187 \
188 \
189 \
190 \
191 \
192 \
193 \
194 \
195 \
196 \
197 \
198 \
199 \
200 \
201 \
202 \
203 \
204 \
205 \
206 \
207 \
208 \
209 \
210 \
211 \
212 \
213 \
214 \
215 \
216 \
217 \
218 \
219 \
220 \
221 \
222 \
223 \
224 \
225 \
226 \
227 \
228 \
229 \
230 \
231 \
232 \
233 \
234 \
235 \
236 \
237 \
238 \
239 \
240 \
241 \
242 \
243 \
244 \
245 \
246 \
247 \
248 \
249 \
250 \
251 \
252 \
253 \
254 \
255 \
256 \
257 \
258 \
259 \
260 \
261 \
262 \
263 \
264 \
265 \
266 \
267 \
268 \
269 \
270 \
271 \
272 \
273 \
274 \
275 \
276 \
277 \
278 \
279 \
280 \
281 \
282 \
283 \
284 \
285 \
286 \
287 \
288 \
289 \
290 \
291 \
292 \
293 \
294 \
295 \
296 \
297 \
298 \
299 \
300 \
301 \
302 \
303 \
304 \
305 \
306 \
307 \
308 \
309 \
310 \
311 \
312 \
313 \
314 \
315 \
316 \
317 \
318 \
319 \
320 \
321 \
322 \
323 \
324 \
325 \
326 \
327 \
328 \
329 \
330 \
331 \
332 \
333 \
334 \
335 \
336 \
337 \
338 \
339 \
340 \
341 \
342 \
343 \
344 \
345 \
346 \
347 \
348 \
349 \
350 \
351 \
352 \
353 \
354 \
355 \
356 \
357 \
358 \
359 \
360 \
361 \
362 \
363 \
364 \
365 \
366 \
367 \
368 \
369 \
370 \
371 \
372 \
373 \
374 \
375 \
376 \
377 \
378 \
379 \
380 \
381 \
382 \
383 \
384 \
385 西\
386 \
387 \
388 \
389 \
390 \
391 \
392 \
393 \
394 \
395 \
396 \
397 \
398 \
399 \
400 \
401 \
402 \
403 \
404 \
405 \
406 \
407 \
408 \
409 \
410 \
411 \
412 \
413 \
414 \
415 \
416 \
417 \
418 \
419 \
420 \
421 \
422 \
423 \
424 \
425 \
426 \
427 \
428 \
429 \
430 \
431 \
432 \
433 \
434 \
435 屿\
436 \
437 \
438 \
439 \
440 \
441 \
442 \
443 \
444 \
445 \
446 \
447 \
448 \
449 \
450 \
451 \
452 \
453 \
454 \
455 \
456 \
457 \
458 \
459 \
460 \
461 \
462 \
463 \
464 \
465 \
466 \
467 \
468 \
469 \
470 \
471 \
472 \
473 \
474 \
475 仿\
476 \
477 \
478 \
479 \
480 \
481 \
482 \
483 \
484 \
485 \
486 \
487 \
488 \
489 \
490 \
491 \
492 \
493 \
494 \
495 \
496 \
497 \
498 \
499 \
500 \
501 \
502 \
503 \
504 \
505 \
506 \
507 \
508 \
509 \
510 \
511 \
512 \
513 \
514 \
515 \
516 \
517 \
518 \
519 \
520 \
521 \
522 \
523 \
524 \
525 \
526 \
527 \
528 \
529 \
530 \
531 \
532 \
533 \
534 \
535 \
536 \
537 \
538 \
539 \
540 \
541 \
542 \
543 \
544 \
545 \
546 \
547 \
548 \
549 \
550 \
551 \
552 \
553 \
554 \
555 \
556 \
557 \
558 \
559 \
560 \
561 \
562 访\
563 \
564 \
565 \
566 \
567 \
568 \
569 \
570 \
571 \
572 \
573 \
574 \
575 \
576 \
577 \
578 \
579 \
580 \
581 \
582 \
583 \
584 \
585 \
586 \
587 \
588 \
589 \
590 \
591 \
592 \
593 \
594 \
595 \
596 \
597 \
598 \
599 \
600 寿\
601 \
602 \
603 \
604 \
605 \
606 \
607 \
608 \
609 \
610 \
611 \
612 \
613 \
614 \
615 \
616 \
617 \
618 \
619 \
620 \
621 \
622 \
623 \
624 \
625 \
626 \
627 \
628 \
629 \
630 \
631 \
632 \
633 \
634 \
635 \
636 \
637 \
638 \
639 \
640 \
641 \
642 \
643 \
644 \
645 \
646 \
647 \
648 \
649 \
650 \
651 \
652 \
653 \
654 \
655 \
656 \
657 \
658 \
659 \
660 \
661 \
662 \
663 \
664 \
665 \
666 \
667 \
668 \
669 \
670 \
671 \
672 \
673 \
674 \
675 \
676 \
677 \
678 \
679 \
680 \
681 \
682 \
683 \
684 \
685 \
686 \
687 \
688 \
689 \
690 \
691 \
692 \
693 \
694 \
695 \
696 \
697 \
698 \
699 \
700 \
701 \
702 \
703 \
704 \
705 \
706 \
707 \
708 \
709 \
710 \
711 \
712 \
713 \
714 \
715 \
716 \
717 \
718 \
719 \
720 \
721 \
722 \
723 \
724 \
725 \
726 \
727 \
728 \
729 \
730 \
731 \
732 \
733 \
734 \
735 \
736 \
737 \
738 \
739 \
740 \
741 \
742 \
743 \
744 \
745 \
746 \
747 \
748 \
749 \
750 \
751 \
752 \
753 \
754 \
755 \
756 \
757 \
758 \
759 \
760 \
761 \
762 \
763 \
764 \
765 \
766 \
767 \
768 \
769 \
770 \
771 \
772 \
773 \
774 \
775 \
776 \
777 \
778 \
779 \
780 \
781 \
782 \
783 \
784 \
785 \
786 \
787 \
788 \
789 \
790 \
791 \
792 \
793 \
794 \
795 \
796 \
797 \
798 \
799 \
800 \
801 \
802 \
803 \
804 \
805 \
806 \
807 \
808 \
809 \
810 \
811 \
812 \
813 \
814 \
815 \
816 \
817 \
818 \
819 \
820 \
821 \
822 \
823 \
824 \
825 \
826 \
827 \
828 \
829 \
830 \
831 \
832 \
833 \
834 \
835 \
836 \
837 \
838 \
839 \
840 \
841 \
842 \
843 \
844 \
845 \
846 \
847 \
848 \
849 \
850 \
851 \
852 \
853 \
854 \
855 \
856 \
857 \
858 \
859 \
860 \
861 \
862 \
863 \
864 \
865 \
866 \
867 \
868 \
869 \
870 \
871 \
872 \
873 \
874 怀\
875 \
876 \
877 \
878 \
879 \
880 \
881 \
882 \
883 \
884 \
885 \
886 \
887 \
888 \
889 \
890 \
891 \
892 \
893 \
894 \
895 \
896 \
897 \
898 \
899 \
900 \
901 \
902 \
903 \
904 \
905 尿\
906 \
907 \
908 \
909 \
910 \
911 \
912 \
913 \
914 \
915 \
916 \
917 \
918 \
919 \
920 \
921 \
922 \
923 \
924 \
925 \
926 \
927 \
928 \
929 \
930 \
931 \
932 \
933 \
934 \
935 \
936 \
937 \
938 \
939 \
940 \
941 \
942 \
943 \
944 \
945 \
946 \
947 \
948 \
949 \
950 \
951 \
952 \
953 \
954 \
955 \
956 \
957 \
958 \
959 \
960 \
961 \
962 \
963 \
964 \
965 \
966 \
967 \
968 \
969 \
970 \
971 \
972 \
973 \
974 \
975 \
976 \
977 \
978 \
979 \
980 \
981 \
982 \
983 \
984 \
985 \
986 \
987 \
988 \
989 \
990 \
991 \
992 \
993 \
994 \
995 \
996 \
997 \
998 \
999 \
1000 \
1001 \
1002 \
1003 \
1004 \
1005 \
1006 \
1007 \
1008 \
1009 \
1010 \
1011 \
1012 \
1013 \
1014 \
1015 \
1016 \
1017 \
1018 \
1019 \
1020 \
1021 \
1022 \
1023 \
1024 \
1025 \
1026 \
1027 \
1028 \
1029 \
1030 \
1031 \
1032 \
1033 \
1034 \
1035 \
1036 \
1037 \
1038 \
1039 \
1040 \
1041 \
1042 \
1043 \
1044 \
1045 \
1046 \
1047 \
1048 \
1049 \
1050 \
1051 \
1052 \
1053 \
1054 \
1055 \
1056 \
1057 \
1058 \
1059 \
1060 \
1061 \
1062 \
1063 \
1064 \
1065 \
1066 \
1067 \
1068 \
1069 齿\
1070 \
1071 \
1072 \
1073 \
1074 \
1075 \
1076 \
1077 \
1078 \
1079 \
1080 \
1081 \
1082 \
1083 \
1084 \
1085 \
1086 \
1087 \
1088 \
1089 \
1090 \
1091 \
1092 \
1093 \
1094 \
1095 \
1096 \
1097 \
1098 \
1099 \
1100 \
1101 \
1102 \
1103 \
1104 \
1105 \
1106 \
1107 \
1108 \
1109 \
1110 \
1111 \
1112 \
1113 \
1114 \
1115 \
1116 \
1117 \
1118 \
1119 \
1120 \
1121 \
1122 \
1123 \
1124 \
1125 \
1126 \
1127 \
1128 \
1129 \
1130 \
1131 \
1132 \
1133 \
1134 \
1135 \
1136 \
1137 \
1138 \
1139 \
1140 使\
1141 \
1142 \
1143 \
1144 \
1145 \
1146 \
1147 \
1148 \
1149 \
1150 \
1151 \
1152 \
1153 \
1154 \
1155 \
1156 \
1157 \
1158 \
1159 \
1160 \
1161 \
1162 \
1163 \
1164 \
1165 \
1166 \
1167 \
1168 \
1169 \
1170 \
1171 \
1172 \
1173 \
1174 \
1175 \
1176 \
1177 \
1178 \
1179 \
1180 忿\
1181 \
1182 \
1183 \
1184 \
1185 \
1186 \
1187 \
1188 \
1189 \
1190 \
1191 \
1192 \
1193 \
1194 \
1195 \
1196 \
1197 \
1198 \
1199 \
1200 \
1201 \
1202 \
1203 \
1204 \
1205 \
1206 \
1207 \
1208 \
1209 \
1210 \
1211 \
1212 \
1213 \
1214 \
1215 \
1216 \
1217 \
1218 \
1219 \
1220 \
1221 \
1222 \
1223 \
1224 \
1225 \
1226 \
1227 \
1228 \
1229 \
1230 \
1231 \
1232 \
1233 \
1234 \
1235 \
1236 \
1237 \
1238 \
1239 \
1240 \
1241 \
1242 \
1243 \
1244 \
1245 \
1246 \
1247 \
1248 \
1249 \
1250 \
1251 沿\
1252 \
1253 \
1254 \
1255 \
1256 \
1257 \
1258 \
1259 \
1260 \
1261 \
1262 \
1263 \
1264 \
1265 \
1266 \
1267 \
1268 \
1269 \
1270 \
1271 \
1272 \
1273 \
1274 \
1275 \
1276 \
1277 \
1278 \
1279 \
1280 \
1281 \
1282 \
1283 \
1284 \
1285 \
1286 \
1287 \
1288 \
1289 \
1290 \
1291 \
1292 \
1293 \
1294 \
1295 \
1296 \
1297 \
1298 \
1299 \
1300 \
1301 \
1302 \
1303 \
1304 \
1305 \
1306 \
1307 \
1308 \
1309 \
1310 \
1311 \
1312 \
1313 \
1314 \
1315 \
1316 \
1317 \
1318 \
1319 \
1320 \
1321 \
1322 \
1323 \
1324 \
1325 \
1326 \
1327 \
1328 \
1329 \
1330 \
1331 \
1332 \
1333 \
1334 \
1335 \
1336 \
1337 线\
1338 \
1339 \
1340 \
1341 \
1342 \
1343 \
1344 \
1345 \
1346 \
1347 \
1348 \
1349 \
1350 \
1351 \
1352 \
1353 \
1354 \
1355 \
1356 \
1357 \
1358 \
1359 \
1360 \
1361 \
1362 \
1363 \
1364 \
1365 \
1366 \
1367 \
1368 \
1369 \
1370 \
1371 \
1372 \
1373 \
1374 \
1375 \
1376 \
1377 \
1378 \
1379 \
1380 \
1381 \
1382 \
1383 \
1384 \
1385 \
1386 \
1387 \
1388 \
1389 \
1390 \
1391 \
1392 \
1393 \
1394 \
1395 \
1396 \
1397 \
1398 \
1399 \
1400 \
1401 \
1402 \
1403 \
1404 \
1405 \
1406 \
1407 \
1408 \
1409 \
1410 \
1411 \
1412 \
1413 \
1414 \
1415 \
1416 \
1417 \
1418 \
1419 \
1420 \
1421 \
1422 \
1423 \
1424 \
1425 \
1426 \
1427 \
1428 \
1429 \
1430 \
1431 \
1432 \
1433 \
1434 \
1435 \
1436 \
1437 \
1438 \
1439 \
1440 \
1441 \
1442 \
1443 \
1444 \
1445 \
1446 \
1447 \
1448 \
1449 \
1450 \
1451 \
1452 \
1453 \
1454 \
1455 \
1456 \
1457 \
1458 \
1459 \
1460 \
1461 \
1462 \
1463 \
1464 \
1465 \
1466 \
1467 \
1468 \
1469 \
1470 \
1471 \
1472 \
1473 \
1474 \
1475 \
1476 \
1477 \
1478 \
1479 \
1480 \
1481 \
1482 \
1483 \
1484 \
1485 \
1486 \
1487 \
1488 \
1489 \
1490 \
1491 \
1492 \
1493 \
1494 \
1495 \
1496 \
1497 \
1498 \
1499 \
1500 \
1501 \
1502 \
1503 \
1504 \
1505 \
1506 \
1507 \
1508 \
1509 \
1510 \
1511 \
1512 \
1513 \
1514 \
1515 \
1516 \
1517 \
1518 \
1519 \
1520 \
1521 \
1522 \
1523 \
1524 \
1525 \
1526 \
1527 \
1528 \
1529 \
1530 \
1531 \
1532 \
1533 \
1534 \
1535 \
1536 \
1537 \
1538 \
1539 \
1540 \
1541 \
1542 \
1543 \
1544 \
1545 \
1546 \
1547 \
1548 \
1549 \
1550 \
1551 \
1552 \
1553 \
1554 \
1555 竿\
1556 \
1557 便\
1558 \
1559 \
1560 \
1561 \
1562 \
1563 \
1564 \
1565 \
1566 \
1567 \
1568 \
1569 \
1570 \
1571 \
1572 \
1573 \
1574 \
1575 \
1576 \
1577 \
1578 \
1579 \
1580 \
1581 \
1582 \
1583 \
1584 \
1585 \
1586 \
1587 \
1588 \
1589 \
1590 \
1591 \
1592 \
1593 \
1594 \
1595 \
1596 \
1597 \
1598 \
1599 \
1600 \
1601 \
1602 \
1603 \
1604 \
1605 \
1606 \
1607 \
1608 \
1609 \
1610 \
1611 \
1612 \
1613 \
1614 \
1615 \
1616 \
1617 \
1618 \
1619 \
1620 \
1621 \
1622 \
1623 \
1624 \
1625 \
1626 \
1627 \
1628 \
1629 \
1630 \
1631 姿\
1632 \
1633 \
1634 \
1635 \
1636 \
1637 \
1638 \
1639 \
1640 \
1641 \
1642 \
1643 \
1644 \
1645 \
1646 \
1647 \
1648 \
1649 \
1650 \
1651 \
1652 \
1653 \
1654 \
1655 \
1656 \
1657 \
1658 \
1659 \
1660 \
1661 \
1662 \
1663 \
1664 \
1665 \
1666 \
1667 \
1668 \
1669 \
1670 \
1671 \
1672 \
1673 \
1674 \
1675 \
1676 \
1677 \
1678 \
1679 \
1680 \
1681 \
1682 \
1683 \
1684 \
1685 \
1686 \
1687 \
1688 \
1689 \
1690 \
1691 \
1692 \
1693 \
1694 \
1695 \
1696 \
1697 \
1698 \
1699 \
1700 \
1701 \
1702 穿\
1703 \
1704 \
1705 \
1706 \
1707 \
1708 \
1709 \
1710 \
1711 \
1712 \
1713 \
1714 \
1715 \
1716 \
1717 \
1718 \
1719 \
1720 \
1721 退\
1722 \
1723 \
1724 \
1725 \
1726 \
1727 \
1728 \
1729 \
1730 \
1731 \
1732 \
1733 \
1734 \
1735 \
1736 \
1737 \
1738 \
1739 \
1740 \
1741 \
1742 \
1743 \
1744 \
1745 \
1746 \
1747 \
1748 \
1749 \
1750 \
1751 \
1752 \
1753 \
1754 \
1755 \
1756 \
1757 \
1758 \
1759 \
1760 \
1761 \
1762 \
1763 \
1764 \
1765 \
1766 \
1767 \
1768 \
1769 \
1770 \
1771 \
1772 \
1773 \
1774 \
1775 \
1776 \
1777 \
1778 \
1779 \
1780 \
1781 \
1782 \
1783 \
1784 \
1785 \
1786 \
1787 \
1788 \
1789 \
1790 \
1791 \
1792 \
1793 \
1794 \
1795 \
1796 \
1797 \
1798 \
1799 \
1800 \
1801 \
1802 \
1803 \
1804 \
1805 \
1806 \
1807 \
1808 \
1809 \
1810 \
1811 \
1812 \
1813 \
1814 \
1815 \
1816 \
1817 \
1818 \
1819 \
1820 \
1821 \
1822 \
1823 \
1824 \
1825 \
1826 \
1827 \
1828 \
1829 \
1830 \
1831 \
1832 \
1833 \
1834 \
1835 \
1836 \
1837 \
1838 \
1839 \
1840 \
1841 \
1842 \
1843 \
1844 \
1845 \
1846 \
1847 \
1848 \
1849 \
1850 \
1851 \
1852 \
1853 \
1854 \
1855 \
1856 \
1857 \
1858 \
1859 \
1860 \
1861 \
1862 \
1863 \
1864 \
1865 \
1866 \
1867 \
1868 \
1869 \
1870 \
1871 \
1872 \
1873 \
1874 \
1875 \
1876 轿\
1877 \
1878 \
1879 \
1880 \
1881 \
1882 \
1883 \
1884 \
1885 \
1886 \
1887 \
1888 \
1889 \
1890 \
1891 \
1892 \
1893 \
1894 \
1895 \
1896 \
1897 \
1898 \
1899 \
1900 \
1901 \
1902 \
1903 \
1904 \
1905 \
1906 \
1907 \
1908 \
1909 \
1910 \
1911 \
1912 \
1913 \
1914 \
1915 \
1916 \
1917 \
1918 \
1919 \
1920 \
1921 \
1922 \
1923 \
1924 \
1925 \
1926 贿\
1927 \
1928 \
1929 \
1930 \
1931 \
1932 \
1933 \
1934 \
1935 \
1936 \
1937 \
1938 \
1939 \
1940 \
1941 \
1942 \
1943 \
1944 \
1945 \
1946 \
1947 \
1948 \
1949 \
1950 \
1951 \
1952 \
1953 \
1954 \
1955 \
1956 \
1957 \
1958 \
1959 \
1960 \
1961 \
1962 \
1963 \
1964 \
1965 \
1966 \
1967 \
1968 \
1969 \
1970 \
1971 \
1972 \
1973 \
1974 \
1975 \
1976 \
1977 \
1978 \
1979 \
1980 \
1981 \
1982 \
1983 \
1984 \
1985 \
1986 \
1987 \
1988 \
1989 \
1990 \
1991 \
1992 \
1993 \
1994 \
1995 \
1996 \
1997 \
1998 \
1999 \
2000 \
2001 \
2002 \
2003 \
2004 \
2005 \
2006 \
2007 \
2008 \
2009 \
2010 \
2011 \
2012 \
2013 饿\
2014 \
2015 \
2016 \
2017 \
2018 \
2019 \
2020 \
2021 \
2022 \
2023 \
2024 \
2025 \
2026 \
2027 \
2028 \
2029 \
2030 \
2031 \
2032 \
2033 \
2034 \
2035 \
2036 \
2037 \
2038 \
2039 \
2040 \
2041 \
2042 \
2043 \
2044 \
2045 \
2046 \
2047 \
2048 \
2049 \
2050 \
2051 \
2052 \
2053 \
2054 \
2055 \
2056 \
2057 \
2058 \
2059 \
2060 \
2061 \
2062 \
2063 \
2064 \
2065 \
2066 \
2067 \
2068 \
2069 \
2070 \
2071 \
2072 \
2073 \
2074 \
2075 \
2076 \
2077 \
2078 \
2079 \
2080 \
2081 \
2082 \
2083 \
2084 \
2085 \
2086 \
2087 \
2088 \
2089 \
2090 \
2091 \
2092 \
2093 \
2094 \
2095 \
2096 \
2097 \
2098 \
2099 \
2100 \
2101 \
2102 \
2103 \
2104 \
2105 \
2106 \
2107 \
2108 \
2109 \
2110 \
2111 \
2112 \
2113 \
2114 \
2115 \
2116 \
2117 \
2118 \
2119 \
2120 \
2121 \
2122 \
2123 \
2124 \
2125 \
2126 \
2127 \
2128 \
2129 \
2130 \
2131 \
2132 \
2133 \
2134 \
2135 \
2136 \
2137 \
2138 \
2139 \
2140 \
2141 \
2142 \
2143 \
2144 \
2145 \
2146 \
2147 \
2148 \
2149 \
2150 \
2151 \
2152 \
2153 \
2154 \
2155 \
2156 \
2157 \
2158 \
2159 \
2160 \
2161 \
2162 \
2163 \
2164 \
2165 \
2166 \
2167 \
2168 \
2169 \
2170 \
2171 \
2172 \
2173 \
2174 \
2175 \
2176 \
2177 \
2178 \
2179 \
2180 \
2181 \
2182 \
2183 \
2184 \
2185 \
2186 \
2187 \
2188 \
2189 \
2190 \
2191 \
2192 \
2193 \
2194 \
2195 \
2196 \
2197 \
2198 \
2199 \
2200 \
2201 \
2202 \
2203 \
2204 \
2205 \
2206 \
2207 \
2208 \
2209 \
2210 \
2211 \
2212 \
2213 \
2214 \
2215 \
2216 \
2217 \
2218 \
2219 \
2220 \
2221 \
2222 \
2223 \
2224 \
2225 \
2226 \
2227 \
2228 \
2229 \
2230 \
2231 \
2232 \
2233 \
2234 \
2235 \
2236 \
2237 \
2238 \
2239 \
2240 \
2241 \
2242 \
2243 \
2244 \
2245 \
2246 \
2247 \
2248 \
2249 \
2250 \
2251 \
2252 \
2253 \
2254 \
2255 \
2256 \
2257 \
2258 \
2259 \
2260 \
2261 \
2262 \
2263 \
2264 \
2265 \
2266 \
2267 \
2268 \
2269 \
2270 \
2271 \
2272 \
2273 \
2274 \
2275 \
2276 \
2277 \
2278 \
2279 \
2280 \
2281 \
2282 \
2283 \
2284 \
2285 \
2286 \
2287 \
2288 \
2289 \
2290 \
2291 \
2292 \
2293 \
2294 \
2295 \
2296 \
2297 \
2298 \
2299 \
2300 \
2301 \
2302 \
2303 \
2304 \
2305 \
2306 \
2307 \
2308 \
2309 \
2310 \
2311 \
2312 \
2313 \
2314 \
2315 \
2316 \
2317 \
2318 \
2319 \
2320 \
2321 \
2322 \
2323 \
2324 \
2325 \
2326 \
2327 \
2328 \
2329 \
2330 \
2331 \
2332 \
2333 \
2334 \
2335 \
2336 \
2337 \
2338 \
2339 \
2340 \
2341 \
2342 \
2343 \
2344 \
2345 \
2346 \
2347 \
2348 \
2349 \
2350 \
2351 \
2352 \
2353 \
2354 \
2355 \
2356 \
2357 \
2358 \
2359 \
2360 \
2361 \
2362 \
2363 \
2364 \
2365 \
2366 \
2367 \
2368 \
2369 \
2370 \
2371 \
2372 \
2373 \
2374 \
2375 \
2376 \
2377 \
2378 \
2379 \
2380 \
2381 \
2382 \
2383 \
2384 \
2385 \
2386 \
2387 \
2388 鹿\
2389 \
2390 \
2391 \
2392 \
2393 \
2394 \
2395 \
2396 \
2397 \
2398 \
2399 \
2400 \
2401 \
2402 \
2403 \
2404 \
2405 \
2406 \
2407 \
2408 \
2409 \
2410 \
2411 \
2412 \
2413 鸿\
2414 \
2415 \
2416 \
2417 \
2418 \
2419 \
2420 \
2421 \
2422 \
2423 \
2424 \
2425 \
2426 \
2427 \
2428 \
2429 \
2430 \
2431 \
2432 \
2433 \
2434 \
2435 \
2436 \
2437 \
2438 \
2439 \
2440 \
2441 \
2442 \
2443 \
2444 \
2445 \
2446 \
2447 \
2448 \
2449 \
2450 \
2451 \
2452 \
2453 \
2454 \
2455 \
2456 宿\
2457 \
2458 \
2459 \
2460 \
2461 \
2462 \
2463 \
2464 \
2465 \
2466 \
2467 \
2468 \
2469 \
2470 \
2471 \
2472 \
2473 \
2474 \
2475 \
2476 \
2477 \
2478 \
2479 \
2480 \
2481 \
2482 \
2483 \
2484 \
2485 \
2486 \
2487 \
2488 \
2489 \
2490 \
2491 \
2492 \
2493 \
2494 \
2495 \
2496 \
2497 \
2498 \
2499 绿\
2500 \
2501 \
2502 \
2503 \
2504 \
2505 \
2506 \
2507 \
2508 \
2509 \
2510 \
2511 \
2512 \
2513 \
2514 \
2515 \
2516 \
2517 \
2518 \
2519 \
2520 \
2521 \
2522 \
2523 \
2524 \
2525 \
2526 \
2527 \
2528 \
2529 \
2530 \
2531 \
2532 \
2533 \
2534 \
2535 \
2536 \
2537 \
2538 \
2539 \
2540 \
2541 \
2542 \
2543 \
2544 \
2545 \
2546 \
2547 \
2548 \
2549 \
2550 \
2551 \
2552 \
2553 \
2554 \
2555 \
2556 \
2557 \
2558 \
2559 \
2560 \
2561 \
2562 \
2563 \
2564 \
2565 \
2566 \
2567 \
2568 \
2569 \
2570 \
2571 \
2572 \
2573 \
2574 \
2575 \
2576 \
2577 \
2578 \
2579 \
2580 \
2581 \
2582 \
2583 \
2584 \
2585 \
2586 \
2587 \
2588 \
2589 \
2590 \
2591 \
2592 \
2593 \
2594 \
2595 \
2596 \
2597 \
2598 \
2599 \
2600 \
2601 \
2602 \
2603 \
2604 \
2605 \
2606 \
2607 \
2608 \
2609 \
2610 \
2611 \
2612 \
2613 \
2614 \
2615 \
2616 \
2617 \
2618 \
2619 \
2620 \
2621 \
2622 \
2623 \
2624 \
2625 \
2626 \
2627 \
2628 \
2629 \
2630 \
2631 \
2632 \
2633 \
2634 \
2635 \
2636 \
2637 \
2638 \
2639 \
2640 \
2641 \
2642 \
2643 \
2644 \
2645 \
2646 \
2647 \
2648 \
2649 \
2650 \
2651 \
2652 \
2653 \
2654 \
2655 \
2656 \
2657 \
2658 \
2659 \
2660 \
2661 \
2662 \
2663 \
2664 \
2665 \
2666 \
2667 \
2668 \
2669 \
2670 \
2671 \
2672 \
2673 \
2674 \
2675 \
2676 \
2677 \
2678 \
2679 \
2680 \
2681 \
2682 \
2683 \
2684 \
2685 \
2686 \
2687 \
2688 \
2689 \
2690 \
2691 \
2692 \
2693 \
2694 \
2695 \
2696 \
2697 \
2698 \
2699 \
2700 \
2701 \
2702 \
2703 \
2704 \
2705 \
2706 \
2707 \
2708 \
2709 \
2710 \
2711 \
2712 \
2713 \
2714 \
2715 \
2716 \
2717 \
2718 \
2719 \
2720 \
2721 \
2722 \
2723 \
2724 \
2725 \
2726 \
2727 \
2728 \
2729 \
2730 \
2731 \
2732 \
2733 \
2734 \
2735 \
2736 \
2737 \
2738 \
2739 \
2740 \
2741 \
2742 \
2743 \
2744 \
2745 \
2746 \
2747 \
2748 \
2749 \
2750 \
2751 \
2752 \
2753 \
2754 \
2755 \
2756 \
2757 湿\
2758 \
2759 \
2760 \
2761 \
2762 \
2763 \
2764 \
2765 \
2766 \
2767 \
2768 \
2769 \
2770 \
2771 \
2772 \
2773 \
2774 \
2775 \
2776 \
2777 \
2778 \
2779 \
2780 \
2781 \
2782 \
2783 \
2784 \
2785 \
2786 \
2787 \
2788 \
2789 \
2790 \
2791 \
2792 \
2793 \
2794 \
2795 \
2796 \
2797 \
2798 \
2799 \
2800 \
2801 \
2802 \
2803 \
2804 \
2805 \
2806 \
2807 \
2808 \
2809 \
2810 \
2811 \
2812 \
2813 婿\
2814 \
2815 \
2816 \
2817 \
2818 \
2819 \
2820 \
2821 \
2822 \
2823 \
2824 \
2825 \
2826 \
2827 \
2828 \
2829 \
2830 \
2831 \
2832 \
2833 \
2834 \
2835 \
2836 \
2837 \
2838 \
2839 \
2840 \
2841 \
2842 \
2843 \
2844 \
2845 \
2846 \
2847 \
2848 \
2849 \
2850 \
2851 \
2852 \
2853 \
2854 \
2855 \
2856 \
2857 \
2858 \
2859 \
2860 \
2861 \
2862 \
2863 椿\
2864 \
2865 \
2866 \
2867 \
2868 \
2869 \
2870 \
2871 \
2872 \
2873 \
2874 \
2875 \
2876 \
2877 \
2878 \
2879 \
2880 \
2881 \
2882 \
2883 \
2884 \
2885 \
2886 \
2887 \
2888 \
2889 \
2890 \
2891 \
2892 \
2893 \
2894 \
2895 \
2896 \
2897 \
2898 \
2899 \
2900 \
2901 \
2902 \
2903 \
2904 \
2905 \
2906 \
2907 \
2908 \
2909 \
2910 \
2911 \
2912 \
2913 \
2914 \
2915 \
2916 \
2917 \
2918 \
2919 \
2920 \
2921 \
2922 \
2923 \
2924 \
2925 \
2926 \
2927 \
2928 \
2929 \
2930 \
2931 \
2932 \
2933 \
2934 \
2935 \
2936 \
2937 \
2938 \
2939 \
2940 \
2941 \
2942 \
2943 \
2944 \
2945 \
2946 \
2947 \
2948 \
2949 \
2950 \
2951 \
2952 \
2953 \
2954 \
2955 \
2956 \
2957 \
2958 \
2959 \
2960 \
2961 \
2962 \
2963 \
2964 \
2965 \
2966 \
2967 \
2968 \
2969 \
2970 \
2971 \
2972 \
2973 \
2974 \
2975 \
2976 \
2977 \
2978 \
2979 \
2980 \
2981 \
2982 \
2983 \
2984 \
2985 \
2986 \
2987 \
2988 \
2989 \
2990 \
2991 \
2992 \
2993 \
2994 \
2995 \
2996 \
2997 \
2998 \
2999 \
3000 \
3001 \
3002 \
3003 \
3004 \
3005 \
3006 \
3007 \
3008 \
3009 \
3010 \
3011 \
3012 \
3013 \
3014 \
3015 \
3016 \
3017 \
3018 \
3019 \
3020 \
3021 \
3022 \
3023 \
3024 \
3025 \
3026 \
3027 \
3028 \
3029 \
3030 \
3031 \
3032 \
3033 \
3034 \
3035 \
3036 \
3037 殿\
3038 \
3039 \
3040 \
3041 \
3042 \
3043 \
3044 \
3045 \
3046 \
3047 \
3048 \
3049 \
3050 \
3051 \
3052 \
3053 \
3054 \
3055 \
3056 \
3057 \
3058 \
3059 \
3060 \
3061 \
3062 \
3063 \
3064 \
3065 \
3066 \
3067 \
3068 \
3069 \
3070 \
3071 \
3072 \
3073 \
3074 \
3075 \
3076 \
3077 \
3078 \
3079 \
3080 \
3081 \
3082 \
3083 \
3084 \
3085 \
3086 \
3087 \
3088 \
3089 \
3090 \
3091 \
3092 \
3093 \
3094 \
3095 \
3096 \
3097 \
3098 \
3099 \
3100 \
3101 \
3102 \
3103 \
3104 \
3105 \
3106 \
3107 \
3108 \
3109 \
3110 \
3111 \
3112 \
3113 \
3114 \
3115 \
3116 \
3117 \
3118 \
3119 \
3120 \
3121 \
3122 \
3123 \
3124 \
3125 \
3126 \
3127 \
3128 \
3129 \
3130 \
3131 \
3132 \
3133 \
3134 \
3135 \
3136 \
3137 \
3138 \
3139 \
3140 \
3141 \
3142 \
3143 \
3144 \
3145 \
3146 \
3147 \
3148 \
3149 \
3150 \
3151 \
3152 \
3153 \
3154 \
3155 \
3156 \
3157 \
3158 \
3159 \
3160 \
3161 \
3162 \
3163 \
3164 \
3165 \
3166 \
3167 \
3168 \
3169 \
3170 \
3171 \
3172 \
3173 \
3174 \
3175 \
3176 \
3177 \
3178 \
3179 \
3180 \
3181 \
3182 \
3183 \
3184 \
3185 \
3186 \
3187 \
3188 \
3189 \
3190 \
3191 \
3192 \
3193 \
3194 \
3195 \
3196 \
3197 \
3198 \
3199 \
3200 \
3201 \
3202 \
3203 \
3204 \
3205 \
3206 \
3207 \
3208 \
3209 \
3210 \
3211 \
3212 \
3213 \
3214 \
3215 \
3216 \
3217 \
3218 \
3219 \
3220 \
3221 \
3222 \
3223 \
3224 \
3225 \
3226 \
3227 \
3228 \
3229 \
3230 \
3231 \
3232 \
3233 \
3234 \
3235 \
3236 \
3237 \
3238 \
3239 \
3240 \
3241 \
3242 \
3243 \
3244 \
3245 \
3246 \
3247 \
3248 \
3249 \
3250 \
3251 \
3252 \
3253 \
3254 \
3255 \
3256 \
3257 \
3258 \
3259 \
3260 \
3261 \
3262 \
3263 \
3264 稿\
3265 \
3266 \
3267 \
3268 \
3269 \
3270 \
3271 \
3272 \
3273 \
3274 \
3275 \
3276 \
3277 \
3278 \
3279 \
3280 \
3281 \
3282 \
3283 \
3284 \
3285 \
3286 \
3287 \
3288 \
3289 \
3290 \
3291 \
3292 \
3293 \
3294 \
3295 \
3296 \
3297 \
3298 \
3299 \
3300 \
3301 \
3302 \
3303 \
3304 \
3305 \
3306 \
3307 \
3308 \
3309 \
3310 \
3311 \
3312 \
3313 \
3314 \
3315 \
3316 \
3317 \
3318 \
3319 \
3320 \
3321 \
3322 \
3323 \
3324 \
3325 \
3326 \
3327 \
3328 \
3329 \
3330 \
3331 \
3332 \
3333 \
3334 \
3335 \
3336 \
3337 \
3338 \
3339 \
3340 \
3341 \
3342 \
3343 \
3344 \
3345 \
3346 \
3347 \
3348 \
3349 \
3350 \
3351 \
3352 \
3353 \
3354 \
3355 \
3356 \
3357 \
3358 \
3359 \
3360 \
3361 \
3362 \
3363 \
3364 \
3365 \
3366 \
3367 \
3368 \
3369 \
3370 \
3371 \
3372 \
3373 \
3374 \
3375 \
3376 \
3377 \
3378 \
3379 \
3380 \
3381 \
3382 \
3383 \
3384 窿\
3385 \
3386 \
3387 \
3388 \
3389 \
3390 \
3391 \
3392 \
3393 \
3394 \
3395 \
3396 \
3397 \
3398 \
3399 \
3400 \
3401 \
3402 \
3403 \
3404 \
3405 \
3406 \
3407 \
3408 \
3409 \
3410 \
3411 \
3412 \
3413 \
3414 \
3415 \
3416 \
3417 \
3418 \
3419 \
3420 \
3421 \
3422 \
3423 \
3424 \
3425 \
3426 \
3427 \
3428 \
3429 \
3430 \
3431 \
3432 \
3433 \
3434 \
3435 \
3436 \
3437 \
3438 \
3439 \
3440 \
3441 \
3442 \
3443 \
3444 \
3445 \
3446 \
3447 \
3448 \
3449 \
3450 \
3451 \
3452 \
3453 \
3454 \
3455 \
3456 \
3457 \
3458 \
3459 \
3460 \
3461 \
3462 \
3463 \
3464 簿\
3465 \
3466 \
3467 \
3468 \
3469 \
3470 \
3471 \
3472 \
3473 \
3474 \
3475 \
3476 \
3477 耀\
3478 \
3479 \
3480 \
3481 \
3482 \
3483 \
3484 \
3485 \
3486 \
3487 \
3488 \
3489 \
3490 \
3491 \
3492 \
3493 \
3494 \
3495 \
3496 \
3497 \
3498 \
3499 \
3500 \
3501 \
3502 \
3503 \
3504 \
3505 \
3506 \
3507 \
3508 \
3509 \
3510 \
3511 廿\
3512 \
3513 \
3514 \
3515 \
3516 \
3517 \
3518 \
3519 \
3520 \
3521 \
3522 \
3523 \
3524 \
3525 \
3526 \
3527 \
3528 \
3529 \
3530 \
3531 \
3532 \
3533 \
3534 \
3535 \
3536 \
3537 \
3538 \
3539 \
3540 \
3541 \
3542 \
3543 \
3544 \
3545 \
3546 \
3547 \
3548 \
3549 \
3550 \
3551 \
3552 \
3553 \
3554 \
3555 \
3556 \
3557 \
3558 \
3559 \
3560 \
3561 \
3562 \
3563 \
3564 \
3565 \
3566 \
3567 \
3568 \
3569 \
3570 \
3571 \
3572 \
3573 \
3574 \
3575 \
3576 \
3577 \
3578 \
3579 \
3580 \
3581 \
3582 \
3583 \
3584 \
3585 \
3586 \
3587 \
3588 \
3589 \
3590 \
3591 \
3592 \
3593 \
3594 \
3595 \
3596 \
3597 \
3598 \
3599 \
3600 \
3601 \
3602 \
3603 \
3604 \
3605 \
3606 \
3607 \
3608 \
3609 \
3610 \
3611 \
3612 \
3613 \
3614 \
3615 \
3616 \
3617 \
3618 \
3619 \
3620 \
3621 \
3622 \
3623 \
3624 \
3625 \
3626 \
3627 \
3628 \
3629 \
3630 \
3631 \
3632 \
3633 \
3634 \
3635 \
3636 \
3637 \
3638 \
3639 \
3640 \
3641 \
3642 \
3643 \
3644 \
3645 \
3646 \
3647 \
3648 \
3649 \
3650 \
3651 \
3652 \
3653 \
3654 \
3655 \
3656 \
3657 \
3658 \
3659 \
3660 \
3661 \
3662 \
3663 \
3664 \
3665 \
3666 \
3667 \
3668 \
3669 \
3670 \
3671 \
3672 \
3673 \
3674 \
3675 \
3676 \
3677 \
3678 \
3679 \
3680 \
3681 \
3682 \
3683 \
3684 \
3685 \
3686 \
3687 \
3688 \
3689 \
3690 \
3691 \
3692 \
3693 \
3694 \
3695 \
3696 \
3697 \
3698 \
3699 \
3700 \
3701 \
3702 \
3703 \
3704 \
3705 \
3706 \
3707 \
3708 \
3709 \
3710 \
3711 \
3712 \
3713 \
3714 \
3715 \
3716 \
3717 \
3718 \
3719 \
3720 \
3721 \
3722 \
3723 \
3724 \
3725 \
3726 \
3727 \
3728 \
3729 \
3730 \
3731 \
3732 \
3733 \
3734 \
3735 \
3736 \
3737 \
3738 \
3739 \
3740 \
3741 \
3742 \
3743 \
3744 \
3745 \
3746 \
3747 \
3748 \
3749 \
3750 \
3751 \
3752 \
3753 \
3754 \
3755 \
3756 \
3757 \
3758 \
3759 \
3760 \
3761 \
3762 \
3763 \
3764 \
3765 \
3766 \
3767 \
3768 \
3769 \
3770 \
3771 \
3772 \
3773 \
3774 \
3775 \
3776 \
3777 \
3778 \
3779 \
3780 \
3781 \
3782 \
3783 \
3784 \
3785 \
3786 \
3787 \
3788 \
3789 \
3790 \
3791 \
3792 \
3793 \
3794 \
3795 \
3796 \
3797 \
3798 \
3799 \
3800 \
3801 \
3802 \
3803 \
3804 \
3805 \
3806 \
3807 \
3808 \
3809 \
3810 \
3811 \
3812 \
3813 \
3814 \
3815 \
3816 \
3817 \
3818 \
3819 \
3820 \
3821 \
3822 \
3823 \
3824 \
3825 \
3826 \
3827 \
3828 \
3829 \
3830 \
3831 \
3832 \
3833 \
3834 \
3835 \
3836 \
3837 \
3838 \
3839 \
3840 \
3841 \
3842 \
3843 \
3844 \
3845 \
3846 \
3847 \
3848 \
3849 \
3850 \
3851 \
3852 \
3853 \
3854 \
3855 \
3856 \
3857 \
3858 \
3859 \
3860 \
3861 \
3862 \
3863 \
3864 \
3865 \
3866 \
3867 \
3868 \
3869 \
3870 \
3871 \
3872 \
3873 \
3874 \
3875 \
3876 \
3877 \
3878 \
3879 \
3880 \
3881 \
3882 \
3883 \
3884 \
3885 \
3886 \
3887 \
3888 \
3889 \
3890 \
3891 \
3892 \
3893 \
3894 \
3895 \
3896 \
3897 \
3898 \
3899 \
3900 \
3901 \
3902 \
3903 \
3904 \
3905 \
3906 \
3907 \
3908 \
3909 \
3910 \
3911 \
3912 \
3913 \
3914 \
3915 \
3916 \
3917 \
3918 \
3919 \
3920 \
3921 \
3922 \
3923 \
3924 \
3925 岿\
3926 \
3927 \
3928 \
3929 \
3930 \
3931 \
3932 \
3933 \
3934 \
3935 \
3936 \
3937 \
3938 \
3939 \
3940 \
3941 \
3942 \
3943 \
3944 \
3945 \
3946 \
3947 \
3948 \
3949 \
3950 \
3951 \
3952 \
3953 \
3954 \
3955 \
3956 \
3957 \
3958 \
3959 \
3960 \
3961 \
3962 \
3963 \
3964 \
3965 \
3966 \
3967 \
3968 \
3969 \
3970 \
3971 \
3972 \
3973 \
3974 \
3975 \
3976 \
3977 \
3978 \
3979 \
3980 \
3981 \
3982 \
3983 \
3984 \
3985 \
3986 \
3987 \
3988 \
3989 \
3990 \
3991 \
3992 \
3993 \
3994 \
3995 \
3996 \
3997 \
3998 \
3999 \
4000 \
4001 \
4002 \
4003 \
4004 \
4005 \
4006 \
4007 \
4008 \
4009 \
4010 \
4011 \
4012 \
4013 \
4014 \
4015 \
4016 \
4017 \
4018 \
4019 \
4020 \
4021 \
4022 \
4023 \
4024 \
4025 \
4026 \
4027 \
4028 \
4029 \
4030 \
4031 \
4032 \
4033 \
4034 \
4035 \
4036 \
4037 \
4038 \
4039 \
4040 \
4041 \
4042 \
4043 \
4044 \
4045 \
4046 \
4047 \
4048 \
4049 \
4050 \
4051 \
4052 \
4053 \
4054 \
4055 \
4056 \
4057 \
4058 \
4059 \
4060 \
4061 \
4062 \
4063 \
4064 \
4065 \
4066 \
4067 \
4068 \
4069 \
4070 \
4071 \
4072 \
4073 \
4074 \
4075 \
4076 \
4077 \
4078 \
4079 \
4080 \
4081 \
4082 驿\
4083 \
4084 \
4085 \
4086 \
4087 \
4088 \
4089 \
4090 \
4091 \
4092 \
4093 \
4094 \
4095 \
4096 \
4097 \
4098 \
4099 \
4100 \
4101 \
4102 \
4103 \
4104 \
4105 \
4106 \
4107 \
4108 \
4109 \
4110 \
4111 \
4112 \
4113 \
4114 \
4115 \
4116 \
4117 \
4118 \
4119 \
4120 \
4121 \
4122 \
4123 \
4124 \
4125 \
4126 \
4127 \
4128 \
4129 \
4130 \
4131 \
4132 \
4133 \
4134 \
4135 \
4136 \
4137 \
4138 \
4139 \
4140 \
4141 \
4142 \
4143 \
4144 \
4145 \
4146 \
4147 \
4148 \
4149 \
4150 \
4151 \
4152 \
4153 \
4154 \
4155 \
4156 \
4157 \
4158 \
4159 \
4160 \
4161 \
4162 \
4163 \
4164 \
4165 \
4166 \
4167 \
4168 \
4169 \
4170 \
4171 \
4172 \
4173 \
4174 \
4175 \
4176 \
4177 \
4178 \
4179 \
4180 \
4181 \
4182 \
4183 \
4184 \
4185 \
4186 \
4187 \
4188 \
4189 \
4190 \
4191 𠳐\
4192 \
4193 \
4194 \
4195 \
4196 \
4197 \
4198 \
4199 \
4200 \
4201 \
4202 \
4203 \
4204 \
4205 \
4206 \
4207 \
4208 \
4209 \
4210 \
4211 \
4212 \
4213 \
4214 \
4215 \
4216 \
4217 \
4218 \
4219 \
4220 \
4221 \
4222 \
4223 \
4224 \
4225 \
4226 \
4227 \
4228 \
4229 \
4230 \
4231 \
4232 \
4233 \
4234 \
4235 \
4236 \
4237 \
4238 \
4239 \
4240 \
4241 \
4242 \
4243 \
4244 \
4245 \
4246 \
4247 \
4248 \
4249 \
4250 \
4251 \
4252 \
4253 \
4254 \
4255 \
4256 \
4257 \
4258 \
4259 \
4260 \
4261 \
4262 \
4263 \
4264 \
4265 \
4266 \
4267 \
4268 \
4269 \
4270 \
4271 \
4272 \
4273 \
4274 \
4275 \
4276 \
4277 \
4278 \
4279 \
4280 \
4281 \
4282 \
4283 \
4284 \
4285 \
4286 \
4287 \
4288 \
4289 \
4290 \
4291 \
4292 \
4293 \
4294 \
4295 \
4296 \
4297 \
4298 \
4299 \
4300 \
4301 \
4302 \
4303 \
4304 \
4305 \
4306 \
4307 \
4308 \
4309 \
4310 \
4311 \
4312 \
4313 \
4314 \
4315 \
4316 \
4317 \
4318 \
4319 \
4320 \
4321 \
4322 \
4323 \
4324 \
4325 \
4326 \
4327 \
4328 \
4329 \
4330 \
4331 \
4332 \
4333 \
4334 \
4335 \
4336 \
4337 \
4338 \
4339 \
4340 \
4341 \
4342 \
4343 \
4344 \
4345 \
4346 \
4347 \
4348 \
4349 \
4350 \
4351 \
4352 \
4353 \
4354 \
4355 \
4356 \
4357 \
4358 \
4359 \
4360 \
4361 \
4362 \
4363 \
4364 \
4365 \
4366 \
4367 \
4368 \
4369 \
4370 \
4371 \
4372 \
4373 \
4374 \
4375 \
4376 \
4377 \
4378 \
4379 \
4380 \
4381 \
4382 \
4383 \
4384 \
4385 \
4386 \
4387 \
4388 \
4389 \
4390 \
4391 羿\
4392 \
4393 \
4394 \
4395 \
4396 \
4397 \
4398 \
4399 \
4400 \
4401 \
4402 \
4403 \
4404 \
4405 \
4406 \
4407 \
4408 \
4409 \
4410 \
4411 \
4412 \
4413 \
4414 \
4415 \
4416 \
4417 \
4418 \
4419 \
4420 \
4421 \
4422 \
4423 \
4424 \
4425 \
4426 \
4427 \
4428 \
4429 \
4430 \
4431 \
4432 \
4433 \
4434 \
4435 \
4436 \
4437 \
4438 \
4439 \
4440 \
4441 \
4442 \
4443 \
4444 \
4445 \
4446 \
4447 \
4448 \
4449 \
4450 \
4451 \
4452 \
4453 \
4454 \
4455 \
4456 \
4457 \
4458 \
4459 \
4460 \
4461 \
4462 \
4463 \
4464 \
4465 \
4466 \
4467 \
4468 \
4469 \
4470 \
4471 \
4472 \
4473 \
4474 \
4475 \
4476 \
4477 \
4478 \
4479 \
4480 \
4481 \
4482 \
4483 \
4484 \
4485 \
4486 \
4487 \
4488 \
4489 \
4490 \
4491 \
4492 \
4493 \
4494 \
4495 \
4496 \
4497 \
4498 \
4499 \
4500 \
4501 \
4502 \
4503 \
4504 \
4505 \
4506 趿\
4507 \
4508 \
4509 \
4510 \
4511 \
4512 \
4513 \
4514 \
4515 \
4516 \
4517 \
4518 \
4519 \
4520 \
4521 \
4522 \
4523 \
4524 \
4525 \
4526 \
4527 \
4528 \
4529 \
4530 \
4531 \
4532 \
4533 \
4534 \
4535 \
4536 \
4537 \
4538 \
4539 \
4540 \
4541 \
4542 \
4543 \
4544 \
4545 \
4546 \
4547 \
4548 \
4549 \
4550 \
4551 \
4552 \
4553 \
4554 \
4555 \
4556 \
4557 \
4558 \
4559 \
4560 \
4561 \
4562 \
4563 \
4564 \
4565 \
4566 \
4567 \
4568 \
4569 \
4570 \
4571 \
4572 \
4573 \
4574 \
4575 \
4576 \
4577 \
4578 \
4579 \
4580 \
4581 \
4582 \
4583 \
4584 \
4585 \
4586 \
4587 \
4588 \
4589 \
4590 \
4591 \
4592 \
4593 \
4594 \
4595 \
4596 \
4597 \
4598 \
4599 \
4600 \
4601 \
4602 \
4603 \
4604 \
4605 \
4606 \
4607 \
4608 \
4609 \
4610 \
4611 \
4612 \
4613 \
4614 \
4615 \
4616 \
4617 \
4618 \
4619 \
4620 \
4621 \
4622 \
4623 \
4624 \
4625 \
4626 \
4627 \
4628 \
4629 \
4630 \
4631 \
4632 \
4633 \
4634 \
4635 \
4636 \
4637 \
4638 \
4639 \
4640 \
4641 \
4642 \
4643 \
4644 \
4645 \
4646 \
4647 \
4648 \
4649 \
4650 \
4651 \
4652 \
4653 \
4654 \
4655 \
4656 \
4657 \
4658 \
4659 \
4660 \
4661 \
4662 \
4663 \
4664 \
4665 \
4666 \
4667 \
4668 \
4669 \
4670 \
4671 \
4672 \
4673 \
4674 \
4675 诿\
4676 \
4677 \
4678 \
4679 \
4680 \
4681 \
4682 \
4683 \
4684 \
4685 \
4686 \
4687 \
4688 \
4689 \
4690 \
4691 \
4692 \
4693 \
4694 \
4695 \
4696 \
4697 \
4698 \
4699 \
4700 \
4701 \
4702 \
4703 \
4704 \
4705 \
4706 \
4707 \
4708 \
4709 \
4710 \
4711 \
4712 \
4713 \
4714 \
4715 \
4716 \
4717 \
4718 \
4719 \
4720 \
4721 \
4722 \
4723 \
4724 \
4725 \
4726 \
4727 \
4728 \
4729 \
4730 \
4731 \
4732 \
4733 \
4734 \
4735 \
4736 \
4737 \
4738 \
4739 \
4740 \
4741 \
4742 \
4743 \
4744 \
4745 \
4746 \
4747 \
4748 \
4749 \
4750 \
4751 \
4752 \
4753 \
4754 \
4755 \
4756 \
4757 \
4758 \
4759 \
4760 \
4761 \
4762 \
4763 \
4764 \
4765 \
4766 \
4767 \
4768 \
4769 \
4770 \
4771 \
4772 \
4773 \
4774 \
4775 \
4776 \
4777 \
4778 \
4779 \
4780 \
4781 \
4782 \
4783 \
4784 \
4785 \
4786 \
4787 \
4788 \
4789 \
4790 \
4791 \
4792 \
4793 \
4794 \
4795 \
4796 \
4797 \
4798 \
4799 \
4800 \
4801 \
4802 \
4803 \
4804 \
4805 \
4806 \
4807 \
4808 \
4809 \
4810 \
4811 \
4812 \
4813 \
4814 \
4815 \
4816 \
4817 \
4818 \
4819 \
4820 \
4821 \
4822 \
4823 \
4824 \
4825 \
4826 \
4827 \
4828 \
4829 \
4830 \
4831 \
4832 \
4833 \
4834 \
4835 \
4836 \
4837 \
4838 \
4839 \
4840 \
4841 \
4842 \
4843 \
4844 \
4845 \
4846 \
4847 \
4848 \
4849 \
4850 \
4851 \
4852 \
4853 \
4854 \
4855 \
4856 \
4857 \
4858 \
4859 \
4860 \
4861 \
4862 \
4863 \
4864 \
4865 \
4866 \
4867 \
4868 \
4869 \
4870 \
4871 \
4872 \
4873 \
4874 \
4875 \
4876 \
4877 \
4878 \
4879 \
4880 \
4881 \
4882 \
4883 \
4884 \
4885 \
4886 \
4887 \
4888 \
4889 \
4890 \
4891 \
4892 \
4893 \
4894 \
4895 \
4896 \
4897 \
4898 \
4899 \
4900 \
4901 \
4902 \
4903 \
4904 \
4905 \
4906 \
4907 \
4908 \
4909 \
4910 \
4911 \
4912 \
4913 \
4914 \
4915 \
4916 \
4917 \
4918 \
4919 \
4920 \
4921 \
4922 \
4923 \
4924 \
4925 \
4926 \
4927 \
4928 \
4929 \
4930 \
4931 \
4932 \
4933 \
4934 \
4935 \
4936 \
4937 \
4938 \
4939 \
4940 \
4941 \
4942 \
4943 \
4944 \
4945 \
4946 \
4947 \
4948 \
4949 \
4950 \
4951 \
4952 \
4953 \
4954 \
4955 \
4956 \
4957 \
4958 \
4959 \
4960 \
4961 \
4962 \
4963 \
4964 \
4965 \
4966 \
4967 \
4968 \
4969 \
4970 涿\
4971 \
4972 \
4973 \
4974 \
4975 \
4976 \
4977 \
4978 \
4979 \
4980 \
4981 \
4982 \
4983 \
4984 \
4985 \
4986 \
4987 \
4988 \
4989 \
4990 \
4991 \
4992 \
4993 \
4994 \
4995 \
4996 \
4997 \
4998 \
4999 \
5000 \
5001 \
5002 \
5003 \
5004 \
5005 \
5006 \
5007 \
5008 \
5009 \
5010 \
5011 \
5012 \
5013 \
5014 \
5015 \
5016 \
5017 \
5018 \
5019 \
5020 \
5021 \
5022 \
5023 \
5024 \
5025 \
5026 \
5027 \
5028 \
5029 \
5030 \
5031 \
5032 \
5033 \
5034 \
5035 \
5036 \
5037 \
5038 \
5039 \
5040 \
5041 \
5042 \
5043 \
5044 \
5045 \
5046 \
5047 \
5048 \
5049 \
5050 \
5051 \
5052 \
5053 \
5054 \
5055 \
5056 \
5057 \
5058 \
5059 \
5060 \
5061 \
5062 \
5063 \
5064 \
5065 \
5066 \
5067 \
5068 \
5069 \
5070 \
5071 \
5072 \
5073 \
5074 \
5075 \
5076 \
5077 \
5078 \
5079 \
5080 \
5081 \
5082 \
5083 \
5084 \
5085 \
5086 \
5087 \
5088 \
5089 \
5090 \
5091 \
5092 \
5093 \
5094 \
5095 \
5096 \
5097 \
5098 \
5099 \
5100 \
5101 \
5102 \
5103 \
5104 \
5105 \
5106 \
5107 \
5108 \
5109 \
5110 \
5111 \
5112 \
5113 \
5114 \
5115 \
5116 \
5117 \
5118 \
5119 \
5120 \
5121 \
5122 \
5123 \
5124 \
5125 \
5126 \
5127 \
5128 \
5129 \
5130 \
5131 \
5132 \
5133 \
5134 \
5135 \
5136 \
5137 \
5138 \
5139 \
5140 \
5141 \
5142 \
5143 \
5144 \
5145 \
5146 \
5147 \
5148 \
5149 \
5150 \
5151 \
5152 \
5153 \
5154 \
5155 \
5156 \
5157 \
5158 \
5159 \
5160 \
5161 \
5162 \
5163 \
5164 \
5165 \
5166 \
5167 \
5168 \
5169 \
5170 \
5171 \
5172 \
5173 \
5174 \
5175 \
5176 \
5177 \
5178 \
5179 \
5180 \
5181 \
5182 \
5183 \
5184 \
5185 \
5186 \
5187 \
5188 \
5189 \
5190 \
5191 \
5192 \
5193 \
5194 \
5195 \
5196 \
5197 \
5198 \
5199 \
5200 \
5201 \
5202 \
5203 \
5204 \
5205 \
5206 \
5207 \
5208 \
5209 \
5210 \
5211 \
5212 \
5213 \
5214 \
5215 \
5216 \
5217 \
5218 \
5219 \
5220 \
5221 \
5222 \
5223 \
5224 \
5225 \
5226 \
5227 \
5228 \
5229 \
5230 \
5231 \
5232 \
5233 \
5234 \
5235 \
5236 鱿\
5237 \
5238 \
5239 \
5240 \
5241 \
5242 \
5243 \
5244 \
5245 \
5246 \
5247 \
5248 \
5249 \
5250 \
5251 \
5252 \
5253 \
5254 \
5255 \
5256 \
5257 \
5258 \
5259 \
5260 \
5261 \
5262 \
5263 \
5264 \
5265 \
5266 \
5267 \
5268 \
5269 \
5270 \
5271 \
5272 \
5273 \
5274 \
5275 \
5276 \
5277 \
5278 \
5279 \
5280 \
5281 \
5282 \
5283 \
5284 \
5285 \
5286 \
5287 \
5288 \
5289 \
5290 \
5291 \
5292 \
5293 \
5294 \
5295 \
5296 \
5297 \
5298 \
5299 \
5300 \
5301 \
5302 \
5303 \
5304 \
5305 \
5306 \
5307 \
5308 \
5309 \
5310 \
5311 \
5312 \
5313 \
5314 \
5315 \
5316 \
5317 \
5318 \
5319 \
5320 \
5321 \
5322 \
5323 \
5324 \
5325 \
5326 \
5327 \
5328 \
5329 \
5330 \
5331 \
5332 \
5333 \
5334 \
5335 \
5336 \
5337 \
5338 \
5339 \
5340 \
5341 \
5342 \
5343 \
5344 \
5345 \
5346 \
5347 \
5348 \
5349 \
5350 \
5351 \
5352 \
5353 \
5354 \
5355 \
5356 \
5357 \
5358 \
5359 \
5360 \
5361 \
5362 \
5363 \
5364 \
5365 \
5366 \
5367 \
5368 \
5369 \
5370 \
5371 \
5372 \
5373 \
5374 \
5375 \
5376 \
5377 \
5378 \
5379 \
5380 \
5381 \
5382 \
5383 \
5384 \
5385 \
5386 \
5387 \
5388 \
5389 \
5390 \
5391 \
5392 \
5393 \
5394 \
5395 \
5396 \
5397 \
5398 \
5399 \
5400 \
5401 \
5402 \
5403 \
5404 \
5405 \
5406 \
5407 \
5408 \
5409 \
5410 \
5411 \
5412 \
5413 \
5414 \
5415 \
5416 \
5417 \
5418 \
5419 \
5420 \
5421 \
5422 \
5423 \
5424 \
5425 \
5426 \
5427 \
5428 \
5429 \
5430 \
5431 \
5432 \
5433 \
5434 \
5435 \
5436 \
5437 \
5438 \
5439 \
5440 \
5441 \
5442 \
5443 \
5444 \
5445 \
5446 \
5447 \
5448 \
5449 \
5450 \
5451 \
5452 \
5453 \
5454 \
5455 \
5456 \
5457 \
5458 \
5459 \
5460 \
5461 \
5462 \
5463 \
5464 \
5465 \
5466 \
5467 \
5468 \
5469 \
5470 \
5471 \
5472 \
5473 \
5474 \
5475 \
5476 \
5477 \
5478 \
5479 \
5480 \
5481 \
5482 \
5483 \
5484 \
5485 \
5486 \
5487 \
5488 \
5489 \
5490 \
5491 \
5492 \
5493 \
5494 \
5495 \
5496 \
5497 \
5498 \
5499 \
5500 \
5501 \
5502 \
5503 \
5504 \
5505 \
5506 \
5507 \
5508 \
5509 \
5510 \
5511 \
5512 \
5513 \
5514 \
5515 \
5516 \
5517 \
5518 \
5519 \
5520 \
5521 \
5522 \
5523 \
5524 \
5525 \
5526 \
5527 \
5528 \
5529 \
5530 \
5531 \
5532 \
5533 \
5534 \
5535 \
5536 \
5537 \
5538 \
5539 \
5540 \
5541 \
5542 \
5543 \
5544 \
5545 \
5546 \
5547 \
5548 \
5549 \
5550 \
5551 \
5552 \
5553 \
5554 \
5555 \
5556 \
5557 \
5558 \
5559 \
5560 \
5561 \
5562 \
5563 \
5564 \
5565 \
5566 \
5567 \
5568 \
5569 \
5570 \
5571 \
5572 \
5573 \
5574 \
5575 \
5576 \
5577 \
5578 \
5579 \
5580 \
5581 \
5582 \
5583 \
5584 \
5585 \
5586 \
5587 \
5588 \
5589 \
5590 \
5591 \
5592 \
5593 \
5594 \
5595 \
5596 \
5597 \
5598 \
5599 \
5600 \
5601 \
5602 \
5603 \
5604 \
5605 \
5606 \
5607 \
5608 \
5609 \
5610 \
5611 \
5612 \
5613 \
5614 \
5615 \
5616 \
5617 \
5618 \
5619 \
5620 \
5621 \
5622 \
5623 \
5624 \
5625 \
5626 \
5627 \
5628 \
5629 \
5630 \
5631 \
5632 \
5633 \
5634 \
5635 \
5636 \
5637 \
5638 \
5639 \
5640 \
5641 \
5642 \
5643 \
5644 \
5645 \
5646 \
5647 \
5648 \
5649 \
5650 \
5651 \
5652 \
5653 \
5654 \
5655 \
5656 \
5657 \
5658 \
5659 \
5660 \
5661 \
5662 \
5663 \
5664 \
5665 \
5666 \
5667 \
5668 \
5669 \
5670 \
5671 \
5672 \
5673 \
5674 \
5675 \
5676 \
5677 \
5678 \
5679 \
5680 \
5681 \
5682 \
5683 \
5684 \
5685 \
5686 \
5687 \
5688 \
5689 \
5690 \
5691 \
5692 \
5693 \
5694 \
5695 \
5696 \
5697 \
5698 \
5699 \
5700 \
5701 \
5702 \
5703 \
5704 \
5705 \
5706 \
5707 \
5708 \
5709 \
5710 \
5711 \
5712 \
5713 \
5714 \
5715 \
5716 \
5717 \
5718 \
5719 \
5720 \
5721 \
5722 \
5723 \
5724 \
5725 \
5726 \
5727 \
5728 \
5729 \
5730 \
5731 \
5732 \
5733 \
5734 \
5735 \
5736 \
5737 \
5738 \
5739 \
5740 \
5741 \
5742 \
5743 \
5744 \
5745 \
5746 \
5747 \
5748 \
5749 \
5750 \
5751 \
5752 \
5753 \
5754 \
5755 \
5756 \
5757 \
5758 \
5759 \
5760 \
5761 \
5762 \
5763 \
5764 \
5765 \
5766 \
5767 \
5768 \
5769 \
5770 \
5771 \
5772 \
5773 \
5774 \
5775 \
5776 \
5777 \
5778 \
5779 \
5780 \
5781 \
5782 \
5783 \
5784 \
5785 \
5786 \
5787 \
5788 \
5789 \
5790 \
5791 \
5792 \
5793 粿\
5794 \
5795 \
5796 \
5797 \
5798 \
5799 \
5800 \
5801 \
5802 \
5803 \
5804 \
5805 \
5806 \
5807 \
5808 \
5809 \
5810 \
5811 \
5812 \
5813 \
5814 \
5815 \
5816 \
5817 \
5818 \
5819 \
5820 \
5821 \
5822 \
5823 \
5824 \
5825 \
5826 \
5827 \
5828 \
5829 \
5830 \
5831 \
5832 \
5833 \
5834 \
5835 \
5836 \
5837 \
5838 \
5839 \
5840 \
5841 \
5842 \
5843 \
5844 \
5845 \
5846 \
5847 \
5848 \
5849 \
5850 \
5851 \
5852 \
5853 \
5854 \
5855 \
5856 \
5857 \
5858 \
5859 \
5860 \
5861 \
5862 \
5863 \
5864 \
5865 \
5866 \
5867 \
5868 \
5869 \
5870 \
5871 \
5872 \
5873 \
5874 \
5875 \
5876 \
5877 \
5878 \
5879 \
5880 \
5881 \
5882 \
5883 \
5884 槿\
5885 \
5886 \
5887 \
5888 \
5889 \
5890 \
5891 \
5892 \
5893 \
5894 \
5895 \
5896 \
5897 \
5898 \
5899 \
5900 \
5901 \
5902 \
5903 \
5904 \
5905 \
5906 \
5907 \
5908 \
5909 \
5910 \
5911 \
5912 \
5913 \
5914 \
5915 \
5916 \
5917 \
5918 \
5919 \
5920 \
5921 \
5922 \
5923 \
5924 \
5925 \
5926 \
5927 \
5928 \
5929 \
5930 \
5931 \
5932 \
5933 \
5934 \
5935 \
5936 \
5937 \
5938 \
5939 \
5940 \
5941 \
5942 \
5943 \
5944 \
5945 \
5946 \
5947 \
5948 \
5949 \
5950 \
5951 \
5952 \
5953 \
5954 \
5955 \
5956 \
5957 \
5958 \
5959 \
5960 \
5961 \
5962 \
5963 \
5964 \
5965 \
5966 \
5967 \
5968 \
5969 \
5970 \
5971 \
5972 \
5973 \
5974 \
5975 \
5976 \
5977 \
5978 \
5979 \
5980 \
5981 \
5982 \
5983 \
5984 \
5985 \
5986 \
5987 \
5988 \
5989 \
5990 𥻗\
5991 \
5992 \
5993 \
5994 \
5995 \
5996 \
5997 \
5998 \
5999 \
6000 \
6001 \
6002 \
6003 \
6004 \
6005 \
6006 \
6007 \
6008 \
6009 \
6010 \
6011 \
6012 \
6013 \
6014 \
6015 \
6016 \
6017 \
6018 \
6019 \
6020 \
6021 \
6022 \
6023 \
6024 \
6025 \
6026 \
6027 \
6028 \
6029 \
6030 \
6031 \
6032 \
6033 \
6034 \
6035 \
6036 \
6037 \
6038 \
6039 \
6040 \
6041 \
6042 \
6043 \
6044 \
6045 \
6046 \
6047 \
6048 \
6049 \
6050 \
6051 \
6052 \
6053 \
6054 \
6055 \
6056 \
6057 \
6058 \
6059 \
6060 \
6061 \
6062 \
6063 \
6064 \
6065 \
6066 \
6067 \
6068 \
6069 \
6070 \
6071 \
6072 \
6073 \
6074 \
6075 \
6076 \
6077 \
6078 \
6079 \
6080 \
6081 \
6082 \
6083 \
6084 \
6085 \
6086 \
6087 \
6088 \
6089 \
6090 \
6091 \
6092 \
6093 \
6094 \
6095 \
6096 \
6097 \
6098 \
6099 \
6100 \
6101 \
6102 \
6103 \
6104 \
6105 \
6106 \
6107 \
6108 \
6109 \
6110 \
6111 \
6112 \
6113 \
6114 \
6115 \
6116 \
6117 \
6118 \
6119 \
6120 \
6121 \
6122 \
6123 \
6124 \
6125 \
6126 \
6127 \
6128 \
6129 \
6130 \
6131 \
6132 \
6133 \
6134 \
6135 \
6136 \
6137 \
6138 \
6139 \
6140 \
6141 \
6142 \
6143 \
6144 \
6145 \
6146 \
6147 \
6148 \
6149 \
6150 \
6151 \
6152 \
6153 \
6154 \
6155 \
6156 \
6157 \
6158 \
6159 \
6160 \
6161 \
6162 \
6163 \
6164 \
6165 \
6166 \
6167 \
6168 \
6169 \
6170 \
6171 \
6172 \
6173 \
6174 \
6175 \
6176 \
6177 \
6178 \
6179 \
6180 \
6181 \
6182 \
6183 \
6184 \
6185 \
6186 \
6187 \
6188 \
6189 \
6190 \
6191 \
6192 \
6193 \
6194 \
6195 \
6196 \
6197 \
6198 \
6199 \
6200 \
6201 \
6202 \
6203 \
6204 \
6205 \
6206 \
6207 \
6208 \
6209 \
6210 \
6211 \
6212 \
6213 \
6214 \
6215 \
6216 \
6217 \
6218 \
6219 \
6220 \
6221 \
6222 \
6223 \
6224 \
6225 \
6226 \
6227 \
6228 \
6229 \
6230 \
6231 \
6232 \
6233 \
6234 \
6235 \
6236 \
6237 \
6238 \
6239 \
6240 \
6241 \
6242 \
6243 \
6244 \
6245 \
6246 \
6247 \
6248 \
6249 \
6250 \
6251 \
6252 \
6253 \
6254 \
6255 \
6256 \
6257 \
6258 \
6259 \
6260 \
6261 \
6262 \
6263 \
6264 \
6265 \
6266 \
6267 \
6268 \
6269 \
6270 \
6271 \
6272 \
6273 \
6274 \
6275 \
6276 \
6277 \
6278 \
6279 \
6280 \
6281 \
6282 \
6283 \
6284 \
6285 \
6286 \
6287 \
6288 \
6289 \
6290 \
6291 \
6292 \
6293 \
6294 \
6295 \
6296 \
6297 \
6298 \
6299 \
6300 \
6301 \
6302 \
6303 \
6304 \
6305 \
6306 \
6307 \
6308 \
6309 \
6310 \
6311 \
6312 \
6313 \
6314 \
6315 \
6316 \
6317 \
6318 \
6319 \
6320 \
6321 \
6322 \
6323 \
6324 \
6325 \
6326 \
6327 \
6328 \
6329 \
6330 \
6331 \
6332 \
6333 \
6334 \
6335 \
6336 \
6337 \
6338 \
6339 \
6340 \
6341 \
6342 \
6343 \
6344 \
6345 \
6346 \
6347 \
6348 \
6349 \
6350 \
6351 \
6352 \
6353 \
6354 \
6355 \
6356 \
6357 \
6358 \
6359 \
6360 \
6361 \
6362 \
6363 \
6364 \
6365 \
6366 \
6367 \
6368 \
6369 \
6370 \
6371 \
6372 \
6373 \
6374 \
6375 \
6376 \
6377 \
6378 \
6379 \
6380 \
6381 \
6382 \
6383 \
6384 \
6385 \
6386 \
6387 \
6388 \
6389 \
6390 蹿\
6391 \
6392 \
6393 \
6394 \
6395 \
6396 \
6397 \
6398 \
6399 \
6400 \
6401 \
6402 \
6403 \
6404 \
6405 \
6406 \
6407 \
6408 \
6409 \
6410 \
6411 \
6412 \
6413 \
6414 \
6415 \
6416 \
6417 \
6418 \
6419 \
6420 \
6421 \
6422 \
6423 \
6424 \
6425 \
6426 \
6427 \
6428 \
6429 \
6430 \
6431 \
6432 \
6433 \
6434 \
6435 \
6436 \
6437 \
6438 \
6439 \
6440 \
6441 \
6442 \
6443 \
6444 \
6445 \
6446 \
6447 \
6448 \
6449 \
6450 \
6451 \
6452 \
6453 \
6454 \
6455 \
6456 \
6457 \
6458 \
6459 \
6460 \
6461 \
6462 \
6463 \
6464 \
6465 \
6466 \
6467 \
6468 \
6469 \
6470 \
6471 \
6472 \
6473 \
6474 \
6475 \
6476 \
6477 \
6478 \
6479 \
6480 \
6481 \
6482 \
6483 \
6484 \
6485 \
6486 \
6487 \
6488 \
6489 \
6490 \
6491 \
6492 \
6493 \
6494 \
6495 \
6496 \
6497 \
6498 \
6499 \
6500 \
6501 \
6502 \
6503 \
6504 \
6505 \
6506 \
6507 𠙶\
6508 \
6509 \
6510 \
6511 \
6512 \
6513 \
6514 \
6515 \
6516 氿\
6517 \
6518 \
6519 \
6520 \
6521 \
6522 \
6523 \
6524 \
6525 \
6526 \
6527 \
6528 \
6529 \
6530 𨙸\
6531 \
6532 \
6533 \
6534 \
6535 \
6536 辿\
6537 \
6538 \
6539 \
6540 \
6541 \
6542 \
6543 \
6544 \
6545 \
6546 \
6547 \
6548 \
6549 \
6550 \
6551 \
6552 \
6553 \
6554 \
6555 \
6556 \
6557 \
6558 \
6559 \
6560 \
6561 \
6562 \
6563 \
6564 \
6565 \
6566 \
6567 \
6568 \
6569 \
6570 \
6571 \
6572 \
6573 \
6574 \
6575 \
6576 \
6577 \
6578 \
6579 \
6580 \
6581 \
6582 \
6583 \
6584 \
6585 \
6586 \
6587 \
6588 \
6589 \
6590 \
6591 \
6592 \
6593 \
6594 \
6595 \
6596 \
6597 \
6598 \
6599 \
6600 \
6601 \
6602 \
6603 \
6604 \
6605 \
6606 \
6607 \
6608 \
6609 \
6610 \
6611 \
6612 \
6613 𣲘\
6614 𣲗\
6615 \
6616 \
6617 \
6618 \
6619 \
6620 \
6621 \
6622 \
6623 \
6624 \
6625 \
6626 \
6627 \
6628 \
6629 \
6630 \
6631 \
6632 \
6633 \
6634 \
6635 \
6636 \
6637 \
6638 𨚕\
6639 \
6640 \
6641 \
6642 \
6643 \
6644 \
6645 \
6646 \
6647 \
6648 \
6649 \
6650 \
6651 \
6652 \
6653 \
6654 \
6655 \
6656 \
6657 \
6658 \
6659 𦭜\
6660 \
6661 \
6662 \
6663 \
6664 \
6665 \
6666 \
6667 \
6668 \
6669 \
6670 \
6671 \
6672 \
6673 \
6674 \
6675 \
6676 \
6677 \
6678 \
6679 \
6680 \
6681 \
6682 \
6683 \
6684 \
6685 \
6686 \
6687 \
6688 \
6689 \
6690 \
6691 \
6692 \
6693 \
6694 \
6695 \
6696 \
6697 \
6698 \
6699 \
6700 \
6701 \
6702 \
6703 \
6704 \
6705 \
6706 \
6707 \
6708 \
6709 \
6710 \
6711 \
6712 \
6713 \
6714 \
6715 \
6716 \
6717 \
6718 \
6719 \
6720 \
6721 \
6722 \
6723 \
6724 \
6725 \
6726 \
6727 \
6728 \
6729 \
6730 \
6731 \
6732 \
6733 \
6734 \
6735 \
6736 \
6737 \
6738 \
6739 \
6740 \
6741 \
6742 \
6743 \
6744 \
6745 \
6746 \
6747 \
6748 \
6749 \
6750 \
6751 \
6752 \
6753 𫠊\
6754 \
6755 \
6756 \
6757 \
6758 \
6759 \
6760 \
6761 \
6762 \
6763 \
6764 \
6765 \
6766 \
6767 \
6768 \
6769 \
6770 \
6771 \
6772 \
6773 \
6774 \
6775 \
6776 \
6777 \
6778 \
6779 \
6780 \
6781 \
6782 \
6783 \
6784 \
6785 \
6786 \
6787 \
6788 \
6789 \
6790 \
6791 \
6792 \
6793 \
6794 \
6795 𦰡\
6796 \
6797 \
6798 \
6799 \
6800 \
6801 \
6802 \
6803 \
6804 \
6805 \
6806 \
6807 \
6808 \
6809 \
6810 \
6811 \
6812 \
6813 \
6814 \
6815 \
6816 \
6817 \
6818 \
6819 \
6820 \
6821 \
6822 \
6823 \
6824 \
6825 \
6826 \
6827 \
6828 \
6829 \
6830 \
6831 \
6832 \
6833 \
6834 \
6835 \
6836 \
6837 \
6838 \
6839 \
6840 \
6841 \
6842 \
6843 \
6844 \
6845 \
6846 \
6847 \
6848 \
6849 \
6850 \
6851 \
6852 \
6853 \
6854 \
6855 \
6856 \
6857 \
6858 \
6859 \
6860 \
6861 \
6862 \
6863 \
6864 \
6865 \
6866 \
6867 \
6868 𦙶\
6869 \
6870 \
6871 \
6872 \
6873 \
6874 \
6875 \
6876 \
6877 \
6878 \
6879 \
6880 \
6881 \
6882 \
6883 \
6884 洿\
6885 \
6886 \
6887 \
6888 \
6889 \
6890 \
6891 \
6892 \
6893 \
6894 \
6895 \
6896 \
6897 \
6898 \
6899 \
6900 \
6901 \
6902 \
6903 \
6904 \
6905 \
6906 \
6907 \
6908 \
6909 \
6910 \
6911 \
6912 \
6913 \
6914 \
6915 \
6916 \
6917 \
6918 \
6919 \
6920 \
6921 \
6922 \
6923 \
6924 \
6925 \
6926 \
6927 \
6928 \
6929 \
6930 \
6931 \
6932 \
6933 \
6934 \
6935 \
6936 \
6937 \
6938 \
6939 \
6940 \
6941 \
6942 \
6943 \
6944 \
6945 \
6946 \
6947 \
6948 \
6949 \
6950 \
6951 \
6952 \
6953 \
6954 \
6955 \
6956 \
6957 \
6958 \
6959 \
6960 \
6961 \
6962 \
6963 \
6964 \
6965 \
6966 \
6967 \
6968 \
6969 \
6970 \
6971 \
6972 \
6973 \
6974 \
6975 \
6976 \
6977 𨐈\
6978 \
6979 \
6980 \
6981 \
6982 \
6983 \
6984 \
6985 \
6986 \
6987 \
6988 \
6989 \
6990 \
6991 \
6992 \
6993 \
6994 \
6995 \
6996 \
6997 \
6998 \
6999 峿\
7000 \
7001 \
7002 \
7003 \
7004 \
7005 \
7006 \
7007 \
7008 \
7009 \
7010 \
7011 \
7012 \
7013 \
7014 \
7015 \
7016 \
7017 \
7018 \
7019 \
7020 \
7021 \
7022 \
7023 \
7024 \
7025 \
7026 \
7027 \
7028 \
7029 \
7030 \
7031 \
7032 \
7033 \
7034 \
7035 \
7036 \
7037 \
7038 \
7039 \
7040 \
7041 \
7042 \
7043 \
7044 \
7045 \
7046 \
7047 \
7048 \
7049 \
7050 \
7051 \
7052 \
7053 \
7054 \
7055 \
7056 \
7057 \
7058 \
7059 \
7060 \
7061 \
7062 \
7063 \
7064 \
7065 \
7066 \
7067 \
7068 \
7069 \
7070 \
7071 \
7072 \
7073 \
7074 \
7075 \
7076 \
7077 \
7078 \
7079 \
7080 \
7081 \
7082 \
7083 \
7084 \
7085 𨺙\
7086 \
7087 \
7088 \
7089 \
7090 \
7091 \
7092 \
7093 \
7094 \
7095 \
7096 \
7097 \
7098 \
7099 \
7100 \
7101 \
7102 \
7103 \
7104 \
7105 \
7106 \
7107 \
7108 \
7109 \
7110 \
7111 \
7112 \
7113 \
7114 \
7115 \
7116 \
7117 \
7118 \
7119 \
7120 \
7121 \
7122 \
7123 \
7124 \
7125 \
7126 \
7127 \
7128 \
7129 \
7130 \
7131 \
7132 \
7133 \
7134 \
7135 \
7136 \
7137 \
7138 \
7139 \
7140 \
7141 \
7142 \
7143 \
7144 \
7145 \
7146 \
7147 \
7148 \
7149 \
7150 \
7151 \
7152 \
7153 \
7154 \
7155 \
7156 \
7157 \
7158 \
7159 \
7160 \
7161 \
7162 \
7163 \
7164 \
7165 \
7166 \
7167 \
7168 \
7169 \
7170 \
7171 \
7172 \
7173 \
7174 \
7175 \
7176 \
7177 \
7178 \
7179 \
7180 \
7181 \
7182 \
7183 \
7184 \
7185 \
7186 \
7187 \
7188 \
7189 \
7190 \
7191 \
7192 \
7193 \
7194 \
7195 \
7196 \
7197 \
7198 \
7199 \
7200 \
7201 \
7202 \
7203 \
7204 \
7205 \
7206 \
7207 \
7208 \
7209 \
7210 \
7211 \
7212 \
7213 \
7214 \
7215 \
7216 \
7217 \
7218 \
7219 \
7220 𠅤\
7221 \
7222 \
7223 \
7224 \
7225 \
7226 \
7227 \
7228 \
7229 \
7230 \
7231 \
7232 \
7233 \
7234 \
7235 \
7236 \
7237 \
7238 \
7239 \
7240 \
7241 \
7242 \
7243 \
7244 \
7245 \
7246 \
7247 \
7248 \
7249 \
7250 \
7251 \
7252 \
7253 \
7254 \
7255 \
7256 \
7257 \
7258 \
7259 \
7260 \
7261 \
7262 \
7263 \
7264 \
7265 \
7266 \
7267 \
7268 \
7269 \
7270 \
7271 \
7272 \
7273 \
7274 \
7275 \
7276 \
7277 \
7278 \
7279 \
7280 \
7281 \
7282 \
7283 \
7284 \
7285 \
7286 \
7287 \
7288 \
7289 \
7290 \
7291 \
7292 \
7293 \
7294 \
7295 \
7296 \
7297 \
7298 \
7299 \
7300 \
7301 𡎚\
7302 \
7303 \
7304 \
7305 \
7306 \
7307 \
7308 \
7309 \
7310 \
7311 \
7312 \
7313 \
7314 \
7315 \
7316 \
7317 \
7318 \
7319 \
7320 \
7321 \
7322 \
7323 \
7324 \
7325 \
7326 \
7327 \
7328 \
7329 \
7330 \
7331 \
7332 \
7333 \
7334 \
7335 \
7336 \
7337 \
7338 \
7339 \
7340 \
7341 \
7342 \
7343 \
7344 \
7345 \
7346 \
7347 \
7348 \
7349 \
7350 \
7351 \
7352 \
7353 \
7354 \
7355 𧿹\
7356 \
7357 \
7358 \
7359 \
7360 \
7361 \
7362 \
7363 \
7364 \
7365 崿\
7366 \
7367 \
7368 \
7369 \
7370 \
7371 \
7372 \
7373 \
7374 \
7375 \
7376 \
7377 \
7378 𨱇\
7379 \
7380 \
7381 \
7382 \
7383 \
7384 \
7385 \
7386 \
7387 \
7388 \
7389 \
7390 \
7391 \
7392 \
7393 \
7394 \
7395 \
7396 \
7397 \
7398 \
7399 \
7400 \
7401 \
7402 \
7403 \
7404 \
7405 \
7406 \
7407 \
7408 \
7409 \
7410 \
7411 \
7412 \
7413 \
7414 \
7415 \
7416 \
7417 \
7418 \
7419 \
7420 \
7421 \
7422 \
7423 \
7424 \
7425 \
7426 𣸣\
7427 \
7428 \
7429 \
7430 \
7431 \
7432 \
7433 \
7434 \
7435 \
7436 \
7437 \
7438 \
7439 \
7440 \
7441 \
7442 \
7443 \
7444 \
7445 \
7446 \
7447 \
7448 \
7449 \
7450 \
7451 \
7452 \
7453 \
7454 \
7455 \
7456 \
7457 \
7458 \
7459 \
7460 \
7461 \
7462 \
7463 \
7464 \
7465 \
7466 \
7467 \
7468 \
7469 \
7470 \
7471 𤧛\
7472 \
7473 \
7474 \
7475 \
7476 \
7477 \
7478 \
7479 \
7480 \
7481 \
7482 \
7483 \
7484 \
7485 \
7486 \
7487 \
7488 \
7489 \
7490 \
7491 \
7492 \
7493 \
7494 \
7495 \
7496 \
7497 \
7498 \
7499 \
7500 \
7501 \
7502 \
7503 \
7504 \
7505 \
7506 \
7507 \
7508 \
7509 \
7510 \
7511 \
7512 \
7513 \
7514 \
7515 \
7516 \
7517 \
7518 \
7519 \
7520 \
7521 \
7522 \
7523 \
7524 \
7525 \
7526 \
7527 \
7528 \
7529 \
7530 \
7531 \
7532 \
7533 \
7534 \
7535 \
7536 \
7537 \
7538 \
7539 \
7540 \
7541 \
7542 \
7543 \
7544 \
7545 \
7546 \
7547 \
7548 \
7549 \
7550 \
7551 \
7552 \
7553 \
7554 \
7555 \
7556 \
7557 \
7558 \
7559 \
7560 \
7561 \
7562 \
7563 \
7564 \
7565 \
7566 \
7567 \
7568 \
7569 𦝼\
7570 \
7571 \
7572 \
7573 \
7574 \
7575 \
7576 \
7577 \
7578 \
7579 \
7580 \
7581 \
7582 \
7583 \
7584 \
7585 \
7586 \
7587 \
7588 \
7589 \
7590 \
7591 \
7592 \
7593 \
7594 \
7595 \
7596 \
7597 \
7598 \
7599 \
7600 \
7601 \
7602 \
7603 \
7604 \
7605 \
7606 \
7607 \
7608 \
7609 \
7610 \
7611 \
7612 \
7613 \
7614 \
7615 \
7616 \
7617 \
7618 \
7619 \
7620 \
7621 \
7622 \
7623 \
7624 \
7625 \
7626 \
7627 \
7628 \
7629 \
7630 \
7631 \
7632 \
7633 \
7634 \
7635 \
7636 𡐓\
7637 \
7638 \
7639 \
7640 \
7641 \
7642 \
7643 \
7644 \
7645 \
7646 \
7647 \
7648 \
7649 \
7650 \
7651 𣗋\
7652 \
7653 \
7654 \
7655 \
7656 \
7657 \
7658 \
7659 \
7660 \
7661 \
7662 \
7663 \
7664 𥔲\
7665 \
7666 \
7667 \
7668 \
7669 \
7670 \
7671 \
7672 \
7673 \
7674 \
7675 \
7676 \
7677 \
7678 \
7679 𨱏\
7680 \
7681 \
7682 \
7683 \
7684 \
7685 \
7686 \
7687 \
7688 \
7689 \
7690 \
7691 \
7692 \
7693 \
7694 \
7695 \
7696 \
7697 \
7698 \
7699 \
7700 \
7701 \
7702 \
7703 \
7704 \
7705 \
7706 \
7707 \
7708 \
7709 𩽾\
7710 \
7711 \
7712 \
7713 \
7714 \
7715 \
7716 \
7717 \
7718 \
7719 \
7720 \
7721 \
7722 \
7723 \
7724 \
7725 \
7726 \
7727 \
7728 \
7729 \
7730 \
7731 \
7732 \
7733 \
7734 \
7735 \
7736 \
7737 \
7738 \
7739 \
7740 \
7741 \
7742 \
7743 \
7744 \
7745 \
7746 \
7747 \
7748 \
7749 \
7750 \
7751 \
7752 \
7753 \
7754 \
7755 \
7756 \
7757 \
7758 \
7759 \
7760 \
7761 \
7762 \
7763 \
7764 \
7765 \
7766 \
7767 \
7768 \
7769 \
7770 \
7771 \
7772 \
7773 \
7774 \
7775 \
7776 \
7777 \
7778 \
7779 \
7780 \
7781 \
7782 \
7783 \
7784 \
7785 \
7786 \
7787 \
7788 \
7789 \
7790 \
7791 \
7792 \
7793 \
7794 \
7795 \
7796 \
7797 \
7798 \
7799 \
7800 𩾃\
7801 \
7802 \
7803 \
7804 \
7805 \
7806 \
7807 \
7808 \
7809 \
7810 \
7811 \
7812 \
7813 \
7814 \
7815 \
7816 \
7817 \
7818 \
7819 \
7820 \
7821 \
7822 \
7823 \
7824 \
7825 \
7826 \
7827 \
7828 \
7829 \
7830 \
7831 \
7832 \
7833 \
7834 \
7835 \
7836 \
7837 \
7838 \
7839 \
7840 \
7841 \
7842 \
7843 \
7844 \
7845 \
7846 \
7847 \
7848 \
7849 \
7850 \
7851 \
7852 𥕢\
7853 \
7854 \
7855 \
7856 \
7857 \
7858 \
7859 \
7860 \
7861 \
7862 \
7863 \
7864 \
7865 \
7866 \
7867 \
7868 \
7869 \
7870 \
7871 \
7872 \
7873 \
7874 𨱑\
7875 \
7876 \
7877 \
7878 \
7879 \
7880 \
7881 \
7882 \
7883 \
7884 \
7885 \
7886 \
7887 \
7888 \
7889 \
7890 \
7891 \
7892 \
7893 \
7894 \
7895 \
7896 \
7897 \
7898 \
7899 \
7900 \
7901 \
7902 \
7903 \
7904 \
7905 \
7906 \
7907 \
7908 \
7909 \
7910 \
7911 \
7912 \
7913 \
7914 \
7915 \
7916 \
7917 \
7918 \
7919 𤩽\
7920 \
7921 \
7922 \
7923 \
7924 \
7925 \
7926 \
7927 \
7928 \
7929 \
7930 \
7931 \
7932 \
7933 \
7934 \
7935 \
7936 \
7937 \
7938 \
7939 \
7940 \
7941 \
7942 \
7943 \
7944 \
7945 𨱔\
7946 \
7947 \
7948 \
7949 \
7950 \
7951 \
7952 \
7953 \
7954 \
7955 \
7956 \
7957 \
7958 \
7959 \
7960 谿\
7961 \
7962 \
7963 \
7964 \
7965 鲿\
7966 \
7967 \
7968 \
7969 \
7970 \
7971 \
7972 \
7973 \
7974 \
7975 \
7976 \
7977 \
7978 \
7979 \
7980 𦈡\
7981 \
7982 \
7983 \
7984 \
7985 \
7986 \
7987 \
7988 \
7989 \
7990 \
7991 \
7992 \
7993 \
7994 \
7995 \
7996 \
7997 𥖨\
7998 \
7999 \
8000 \
8001 \
8002 \
8003 \
8004 \
8005 \
8006 \
8007 \
8008 \
8009 \
8010 \
8011 \
8012 \
8013 \
8014 \
8015 \
8016 \
8017 \
8018 \
8019 \
8020 \
8021 𦒍\
8022 \
8023 \
8024 \
8025 \
8026 \
8027 \
8028 \
8029 \
8030 \
8031 \
8032 \
8033 \
8034 \
8035 \
8036 \
8037 \
8038 \
8039 \
8040 \
8041 \
8042 \
8043 \
8044 𩾌\
8045 \
8046 \
8047 \
8048 \
8049 \
8050 \
8051 嬿\
8052 \
8053 \
8054 \
8055 \
8056 \
8057 \
8058 𨟠\
8059 \
8060 \
8061 \
8062 \
8063 \
8064 𨭉\
8065 \
8066 \
8067 \
8068 \
8069 \
8070 \
8071 \
8072 \
8073 \
8074 \
8075 \
8076 𤫉\
8077 \
8078 \
8079 \
8080 \
8081 \
8082 \
8083 \
8084 \
8085 \
8086 \
8087 \
8088 \
8089 \
8090 \
8091 \
8092 \
8093 \
8094 \
8095 \
8096 \
8097 \
8098 \
8099 \
8100 \
8101 \
8102 \
8103 \
8104 \
8105 觿\
8106 \
8107 \
8108 \
8109 \
8110 \
8111 \
8112 :\
8113 .\
8114 ,\
8115 ;\
8116 ?\
8117 \
8118 \
8119 -\
8120 )\
8121 (\
8122 \
8123 \
8124 !\
8125 [\
8126 ]\
8127 %\
8128 "\
8129 \
8130 /\
8131 \
8132 \
8133 _\
8134 =\
8135 +\
8136 \
8137 '\
8138 \
8139 \
8140 \
8141 *\
8142 \
8143 \
8144 \
8145 &\
8146 \
8147 \
8148 \
8149 ~\
8150 \
8151 #\
8152 >\
8153 {\
8154 \
8155 }\
8156 @\
8157 \
8158 |\
8159 \
8160 \\
8161 \
8162 \
8163 \
8164 \
8165 \
8166 β\
8167 $\
8168 °\
8169 \
8170 \
8171 \
8172 ±\
8173 \
8174 `\
8175 ^\
8176 ÷\
8177 \
8178 \
8179 \
8180 α\
8181 \
8182 \
8183 \
8184 <\
8185 \
8186 \
8187 П\
8188 \
8189 \
8190 \
8191 \
8192 \
8193 \
8194 ®\
8195 \
8196 \
8197 ·\
8198 0\
8199 1\
8200 2\
8201 3\
8202 4\
8203 5\
8204 6\
8205 7\
8206 8\
8207 9\
8208 a\
8209 b\
8210 c\
8211 d\
8212 e\
8213 f\
8214 g\
8215 h\
8216 i\
8217 j\
8218 k\
8219 l\
8220 m\
8221 n\
8222 o\
8223 p\
8224 q\
8225 r\
8226 s\
8227 t\
8228 u\
8229 v\
8230 w\
8231 x\
8232 y\
8233 z\
8234 A\
8235 B\
8236 C\
8237 D\
8238 E\
8239 F\
8240 G\
8241 H\
8242 I\
8243 J\
8244 K\
8245 L\
8246 M\
8247 N\
8248 O\
8249 P\
8250 Q\
8251 R\
8252 S\
8253 T\
8254 U\
8255 V\
8256 W\
8257 X\
8258 Y\
8259 Z"""
1 import cv2
2 import time
3 import numpy as np
4 from .alphabets import alphabet
5 import tritonclient.grpc as grpcclient
6
7
8 def sort_poly(p):
9 # Find the minimum coordinate using (Xi+Yi)
10 min_axis = np.argmin(np.sum(p, axis=1))
11 # Sort the box coordinates
12 p = p[[min_axis, (min_axis + 1) % 4, (min_axis + 2) % 4, (min_axis + 3) % 4]]
13 if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]):
14 return p
15 else:
16 return p[[0, 3, 2, 1]]
17
18 def client_init(url="localhost:8001",
19 ssl=False, private_key=None, root_certificates=None, certificate_chain=None,
20 verbose=False):
21 triton_client = grpcclient.InferenceServerClient(
22 url=url,
23 verbose=verbose,
24 ssl=ssl,
25 root_certificates=root_certificates,
26 private_key=private_key,
27 certificate_chain=certificate_chain)
28 return triton_client
29
30 class textRecServer:
31 """_summary_
32 """
33 def __init__(self):
34 super().__init__()
35 self.charactersS = ' ' + alphabet
36 self.batchsize = 8
37
38 self.input_name = 'INPUT__0'
39 self.output_name = 'OUTPUT__0'
40 self.model_name = 'text_rec_torch'
41 self.np_type = np.float32
42 self.quant_type = "FP32"
43 self.compression_algorithm = None
44 self.outputs = []
45 self.outputs.append(grpcclient.InferRequestedOutput(self.output_name))
46
47 def preprocess_one_image(self, image):
48 _, w, _ = image.shape
49 image = self._transform(image, w)
50 return image
51
52 def predict_batch(self, im, boxes):
53 """Summary
54
55 Args:
56 im (TYPE): RGB
57 boxes (TYPE): Description
58
59 Returns:
60 TYPE: Description
61 """
62
63 triton_client = client_init("localhost:8001")
64 count_boxes = len(boxes)
65 boxes = sorted(boxes,
66 key=lambda box: int(32.0 * (np.linalg.norm(box[0] - box[1])) / (np.linalg.norm(box[3] - box[0]))),
67 reverse=True)
68
69 results = {}
70 labels = []
71 rectime = 0.0
72 if len(boxes) != 0:
73 for i in range(len(boxes) // self.batchsize + int(len(boxes) % self.batchsize != 0)):
74 box = boxes[min(len(boxes)-1, i * self.batchsize)]
75 w, h = [int(np.linalg.norm(box[0] - box[1])), int(np.linalg.norm(box[3] - box[0]))]
76 width = max(32, min(int(32.0 * w / h), 960))
77 if width < 32:
78 continue
79 slices = []
80 for index, box in enumerate(boxes[i * self.batchsize:(i + 1) * self.batchsize]):
81 _box = [n for a in box for n in a]
82 if i * self.batchsize + index < count_boxes:
83 results[i * self.batchsize + index] = [list(map(int, _box))]
84 w, h = [int(np.linalg.norm(box[0] - box[1])), int(np.linalg.norm(box[3] - box[0]))]
85 pts1 = np.float32(box)
86 pts2 = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
87
88 # 前处理优化
89 xmin, ymin, _w, _h = cv2.boundingRect(pts1)
90 xmax, ymax = xmin+_w, ymin+_h
91 xmin, ymin = max(0, xmin), max(0, ymin)
92 im_sclice = im[int(ymin):int(ymax), int(xmin):int(xmax), :]
93 pts1[:, 0] -= xmin
94 pts1[:, 1] -= ymin
95
96 M = cv2.getPerspectiveTransform(pts1, pts2)
97 im_crop = cv2.warpPerspective(im_sclice, M, (w, h))
98 im_crop = self._transform(im_crop, width)
99 slices.append(im_crop)
100 start_rec = time.time()
101 slices = self.np_type(slices)
102 slices = slices.transpose(0, 3, 1, 2)
103 slices = slices/127.5-1.
104 inputs = []
105 inputs.append(grpcclient.InferInput(self.input_name, list(slices.shape), self.quant_type))
106 inputs[0].set_data_from_numpy(slices)
107
108 # inference
109 preds = triton_client.infer(
110 model_name=self.model_name,
111 inputs=inputs,
112 outputs=self.outputs,
113 compression_algorithm=self.compression_algorithm
114 )
115 preds = preds.as_numpy(self.output_name).copy()
116 preds = preds.transpose(1, 0)
117 tmp_labels = self.decode(preds)
118 rectime += (time.time() - start_rec)
119 labels.extend(tmp_labels)
120
121 for index, label in enumerate(labels[:count_boxes]):
122 label = label.replace(' ', '').replace('¥', '¥')
123 if label == '':
124 del results[index]
125 continue
126 results[index].append(label)
127 # 重新排序
128 results = list(results.values())
129 results = sorted(results, key=lambda x: x[0][1], reverse=False) # 按 y0 从小到大排
130 keys = [str(i) for i in range(len(results))]
131 results = dict(zip(keys, results))
132 else:
133 results = dict()
134 rectime = -1
135 return results, rectime
136
137 def decode(self, preds):
138 res = []
139 for t in preds:
140 length = len(t)
141 char_list = []
142 for i in range(length):
143 if t[i] != 0 and (not (i > 0 and t[i-1] == t[i])):
144 char_list.append(self.charactersS[t[i]])
145 res.append(u''.join(char_list))
146 return res
147
148 def _transform(self, im, width):
149 height=32
150
151 ori_h, ori_w = im.shape[:2]
152 ratio1 = width * 1.0 / ori_w
153 ratio2 = height * 1.0 / ori_h
154 if ratio1 < ratio2:
155 ratio = ratio1
156 else:
157 ratio = ratio2
158 new_w, new_h = int(ori_w * ratio), int(ori_h * ratio)
159 if new_w<4:
160 new_w = 4
161 im = cv2.resize(im, (new_w, new_h))
162 img = np.ones((height, width, 3), dtype=np.uint8)*230
163 img[:im.shape[0], :im.shape[1], :] = im
164 return img
1 from . import text_detector
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-06-01 19:00:18
5 # @Last Modified : 2022-07-15 11:41:25
6 # @Description :
7
8 import os
9 import cv2
10 import time
11 import pyclipper
12 import numpy as np
13 # import tensorflow as tf
14 from shapely.geometry import Polygon
15
16 # import grpc
17 # from tensorflow_serving.apis import predict_pb2
18 # from tensorflow_serving.apis import prediction_service_pb2_grpc
19
20 import tritonclient.grpc as grpcclient
21
22
23 def resize_with_padding(src, limit_max=1024):
24 '''限制长边不大于 limit_max 短边等比例缩放,以 0 填充'''
25 img = src.copy()
26
27 h, w, _ = img.shape
28 max_side = max(h, w)
29 ratio = limit_max / max_side if max_side > limit_max else 1
30 h, w = int(h * ratio), int(w * ratio)
31 proc = cv2.resize(img, (w, h))
32
33 canvas = np.zeros((limit_max, limit_max, 3), dtype=np.float32)
34 canvas[0:h, 0:w, :] = proc
35 return canvas, ratio
36
37 def rectangle_boxes_zoom(boxes, offset=1):
38 '''Scale the rectangle boxes via offset
39 Input:
40 boxes: with shape (-1, 4, 2)
41 offset: how many pix do you wanna zoom, we recommend less than 5
42 Output:
43 boxes: zoomed
44 '''
45 boxes = np.array(boxes)
46 boxes += [[[-offset,-offset], [offset,-offset], [offset,offset], [-offset,offset]]]
47 return boxes
48
49 def polygons_from_probmap(preds, ratio):
50 # 二值化
51 prob_map_pred = np.array(preds, dtype=np.uint8)[0,:,:,0]
52 # 输入:二值图、轮廓检索(层次)模式、轮廓渐进方法
53 # 输出:轮廓、层级关系
54 contours, hierarchy = cv2.findContours(prob_map_pred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
55
56 boxes = []
57 for contour in contours:
58 if len(contour) < 4:
59 continue
60
61 # Vatti clipping
62 polygon = Polygon(np.array(contour).reshape((-1, 2))).buffer(0)
63 polygon = polygon.convex_hull if polygon.type == 'MultiPolygon' else polygon # Note: 这里不是 bug 是我们故意而为之
64
65 if polygon.area < 10:
66 continue
67
68 distance = polygon.area * 1.5 / polygon.length
69 offset = pyclipper.PyclipperOffset()
70 offset.AddPath(list(polygon.exterior.coords), pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
71 expanded = np.array(offset.Execute(distance)[0]) # Note: 这里不是 bug 是我们故意而为之
72
73 # Convert polygon to rectangle
74 rect = cv2.minAreaRect(expanded)
75 box = cv2.boxPoints(rect)
76 # make clock-wise order
77 box = np.roll(box, 4-box.sum(axis=1).argmin(), 0)
78 box = np.array(box/ratio, dtype=np.int32)
79 boxes.append(box)
80
81 return boxes
82
83 def predict(image):
84
85 image_resized, ratio = resize_with_padding(image, limit_max=1280)
86 input_data = np.expand_dims(image_resized/255., axis=0)
87
88 # options = [('grpc.max_send_message_length', 1000 * 1024 * 1024),
89 # ('grpc.max_receive_message_length', 1000 * 1024 * 1024)]
90 # channel = grpc.insecure_channel('localhost:8500', options=options)
91 # stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
92
93 # request = predict_pb2.PredictRequest()
94 # request.model_spec.name = 'dbnet_model'
95 # request.model_spec.signature_name = 'serving_default'
96 # request.inputs['input_1'].CopyFrom(tf.make_tensor_proto(inputs))
97
98 # result = stub.Predict(request, 100.0) # 100 secs timeout
99
100 # preds = tf.make_ndarray(result.outputs['tf.math.greater'])
101
102 triton_client = grpcclient.InferenceServerClient("localhost:8001")
103
104 # Initialize the data
105 inputs = [grpcclient.InferInput('input_1', input_data.shape, "FP32")]
106 inputs[0].set_data_from_numpy(input_data)
107 outputs = [grpcclient.InferRequestedOutput("tf.math.greater")]
108
109 # Inference
110 results = triton_client.infer(
111 model_name="dbnet_model",
112 inputs=inputs,
113 outputs=outputs
114 )
115 # Get the output arrays from the results
116 preds = results.as_numpy("tf.math.greater")
117
118 boxes = polygons_from_probmap(preds, ratio)
119 #boxes = rectangle_boxes_zoom(boxes, offset=0)
120
121 return boxes
1 # import the necessary packages
2 from .ADC import angle_detector
3 from .DBNet import text_detector
4 from .CRNN import text_recognizer
5 from .object_det import object_detector
6 from .signature_det import signature_detector
...\ No newline at end of file ...\ No newline at end of file
1 from .utils import ObjectDetection
2
3 object_detector = ObjectDetection()
...\ No newline at end of file ...\ No newline at end of file
1 # import grpc
2 import turnsole
3 import numpy as np
4 # import tensorflow as tf
5 # from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc
6
7 import tritonclient.grpc as grpcclient
8
9
10 class ObjectDetection():
11
12 """通用文件检测算法
13 输入图片输出检测结果
14
15 API 文档请参阅:
16 """
17
18 def __init__(self, confidence_threshold=0.5):
19 """初始化检测对象
20
21 Args:
22 confidence_threshold (float, optional): 目标检测模型的分类置信度
23 """
24
25 self.lable2index = {
26 'id_card_info': 0,
27 'id_card_guohui': 1,
28 'lssfz_front': 2,
29 'lssfz_back': 3,
30 'jzz_front': 4,
31 'jzz_back': 5,
32 'txz_front': 6,
33 'txz_back': 7,
34 'bank_card': 8,
35 'vehicle_license_front': 9,
36 'vehicle_license_back': 10,
37 'driving_license_front': 11,
38 'driving_license_back': 12,
39 'vrc_page_12': 13,
40 'vrc_page_34': 14,
41 }
42 self.index2lable = list(self.lable2index.keys())
43
44 # def resize_and_pad_to_384(self, image, jitter=True):
45 # """长边在 256-384 之间随机取一个数,四边 pad 到 384
46
47 # Args:
48 # image (TYPE): An image represented as a numpy ndarray.
49 # """
50 # image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
51 # max_side = tf.random.uniform(
52 # (), 256, 384, dtype=tf.float32) if jitter else 384.
53 # ratio = max_side / tf.reduce_max(image_shape)
54 # image_shape = tf.cast(ratio * image_shape, dtype=tf.int32)
55 # image = tf.image.resize(image, image_shape)
56 # image = tf.image.pad_to_bounding_box(image, 0, 0, 384, 384)
57 # return image, ratio
58
59 def process(self, image):
60 """Processes an image and returns a list of the detected object location and classes data.
61
62 Args:
63 image (TYPE): An image represented as a numpy ndarray.
64 """
65 h, w, _ = image.shape
66 # image, ratio = self.resize_and_pad_to_384(image, jitter=False)
67 image, ratio = turnsole.resize_with_pad(image, target_height=384, target_width=384)
68 input_data = np.expand_dims(image/255., axis=0)
69
70 # options = [('grpc.max_send_message_length', 1000 * 1024 * 1024),
71 # ('grpc.max_receive_message_length', 1000 * 1024 * 1024)]
72 # channel = grpc.insecure_channel('localhost:8500', options=options)
73 # stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
74
75 # request = predict_pb2.PredictRequest()
76 # request.model_spec.name = 'object_detection'
77 # request.model_spec.signature_name = 'serving_default'
78 # request.inputs['image'].CopyFrom(tf.make_tensor_proto(inputs, dtype='float32'))
79 # # 100 secs timeout
80 # result = stub.Predict(request, 100.0)
81
82 # # saved_model_cli show --dir saved_model/ --all # 查看 saved model 的输入输出
83 # boxes = tf.make_ndarray(result.outputs['decode_predictions'])
84 # scores = tf.make_ndarray(result.outputs['decode_predictions_1'])
85 # classes = tf.make_ndarray(result.outputs['decode_predictions_2'])
86 # valid_detections = tf.make_ndarray(
87 # result.outputs['decode_predictions_3'])
88
89 triton_client = grpcclient.InferenceServerClient("localhost:8001")
90
91 # Initialize the data
92 inputs = [grpcclient.InferInput('image', input_data.shape, "FP32")]
93 inputs[0].set_data_from_numpy(input_data.astype('float32'))
94 outputs = [
95 grpcclient.InferRequestedOutput("decode_predictions"),
96 grpcclient.InferRequestedOutput("decode_predictions_1"),
97 grpcclient.InferRequestedOutput("decode_predictions_2"),
98 grpcclient.InferRequestedOutput("decode_predictions_3")
99 ]
100
101 # Inference
102 results = triton_client.infer(
103 model_name="object_detection",
104 inputs=inputs,
105 outputs=outputs
106 )
107 # Get the output arrays from the results
108 boxes = results.as_numpy("decode_predictions")
109 scores = results.as_numpy("decode_predictions_1")
110 classes = results.as_numpy("decode_predictions_2")
111 valid_detections = results.as_numpy("decode_predictions_3")
112
113 boxes = boxes[0][:valid_detections[0]]
114 scores = scores[0][:valid_detections[0]]
115 classes = classes[0][:valid_detections[0]]
116
117 object_list = []
118 for box, score, class_index in zip(boxes, scores, classes):
119 xmin, ymin, xmax, ymax = box / ratio
120 xmin = max(0, int(xmin))
121 ymin = max(0, int(ymin))
122 xmax = min(w, int(xmax))
123 ymax = min(h, int(ymax))
124 class_label = self.index2lable[int(class_index)]
125 item = {
126 "label": class_label,
127 "confidence": float(score),
128 "location": {
129 "xmin": xmin,
130 "ymin": ymin,
131 "xmax": xmax,
132 "ymax": ymax
133 }
134 }
135 object_list.append(item)
136
137 return object_list
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : lk
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-06-28 14:38:57
5 # @Last Modified : 2022-09-06 14:37:47
6 # @Description :
7
8 from .utils import SignatureDetection
9
10 signature_detector = SignatureDetection()
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : lk
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-02-08 14:10:00
5 # @Last Modified : 2022-09-06 14:45:10
6 # @Description :
7
8 import turnsole
9 import numpy as np
10 # import tensorflow as tf
11
12 # import grpc
13 # from tensorflow_serving.apis import predict_pb2
14 # from tensorflow_serving.apis import prediction_service_pb2_grpc
15
16 import tritonclient.grpc as grpcclient
17
18
19 # def resize_and_pad_to_1024(image, jitter=True):
20 # # 长边在 512-1024 之间随机取一个数,四边 pad 到 1024
21 # image_shape = tf.cast(tf.shape(image)[:2], dtype=tf.float32)
22 # max_side = tf.random.uniform((), 512, 1024, dtype=tf.float32) if jitter else 1024.
23 # ratio = max_side / tf.reduce_max(image_shape)
24 # image_shape = tf.cast(ratio * image_shape, dtype=tf.int32)
25 # image = tf.image.resize(image, image_shape)
26 # image = tf.image.pad_to_bounding_box(image, 0, 0, 1024, 1024)
27 # return image, ratio
28
29 class SignatureDetection():
30
31 """签字盖章检测算法
32 输入图片输出检测结果
33
34 API 文档请参阅:
35 """
36
37 def __init__(self, confidence_threshold=0.5):
38 """初始化检测对象
39
40 Args:
41 confidence_threshold (float, optional): 目标检测模型的分类置信度
42 """
43
44 self.lable2index = {
45 'circle': 0,
46 'ellipse': 1,
47 'rectangle': 2,
48 'signature': 3,
49 'qr_code': 4,
50 'bar_code': 5
51 }
52 self.index2lable = {
53 0: 'circle',
54 1: 'ellipse',
55 2: 'rectangle',
56 3: 'signature',
57 4: 'qr_code',
58 5: 'bar_code'
59 }
60
61
62 def process(self, image):
63 """Processes an image and returns a list of the detected signature location and classes data.
64
65 Args:
66 image (TYPE): An image represented as a numpy ndarray.
67 """
68 h, w, _ = image.shape
69
70 # image, ratio = resize_and_pad_to_1024(image, jitter=False)
71 image, ratio = turnsole.resize_with_pad(image, target_height=1024, target_width=1024)
72 input_data = np.expand_dims(np.float32(image/255.), axis=0)
73
74 # options = [('grpc.max_send_message_length', 1000 * 1024 * 1024),
75 # ('grpc.max_receive_message_length', 1000 * 1024 * 1024)]
76 # channel = grpc.insecure_channel('localhost:8500', options=options)
77 # stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
78
79 # request = predict_pb2.PredictRequest()
80 # request.model_spec.name = 'signature_model'
81 # request.model_spec.signature_name = 'serving_default'
82 # request.inputs['image'].CopyFrom(tf.make_tensor_proto(inputs, dtype='float32'))
83 # result = stub.Predict(request, 100.0) # 100 secs timeout
84
85 # # saved_model_cli show --dir saved_model/ --all # 查看 saved model 的输入输出
86 # boxes = tf.make_ndarray(result.outputs['decode_predictions'])
87 # scores = tf.make_ndarray(result.outputs['decode_predictions_1'])
88 # classes = tf.make_ndarray(result.outputs['decode_predictions_2'])
89 # valid_detections = tf.make_ndarray(result.outputs['decode_predictions_3'])
90
91 triton_client = grpcclient.InferenceServerClient("localhost:8001")
92
93 # Initialize the data
94 inputs = [grpcclient.InferInput('image', input_data.shape, "FP32")]
95 inputs[0].set_data_from_numpy(input_data)
96 outputs = [
97 grpcclient.InferRequestedOutput("decode_predictions"),
98 grpcclient.InferRequestedOutput("decode_predictions_1"),
99 grpcclient.InferRequestedOutput("decode_predictions_2"),
100 grpcclient.InferRequestedOutput("decode_predictions_3")
101 ]
102
103 # Inference
104 results = triton_client.infer(
105 model_name="signature_model",
106 inputs=inputs,
107 outputs=outputs
108 )
109 # Get the output arrays from the results
110 boxes = results.as_numpy("decode_predictions")
111 scores = results.as_numpy("decode_predictions_1")
112 classes = results.as_numpy("decode_predictions_2")
113 valid_detections = results.as_numpy("decode_predictions_3")
114
115 boxes = boxes[0][:valid_detections[0]]
116 scores = scores[0][:valid_detections[0]]
117 classes = classes[0][:valid_detections[0]]
118
119 signature_list = []
120 for box, score, class_index in zip(boxes, scores, classes):
121 xmin, ymin, xmax, ymax = box / ratio
122 class_label = self.index2lable[class_index]
123 item = {
124 "label": class_label,
125 "confidence": float(score),
126 "location": {
127 "xmin": max(0, int(xmin)),
128 "ymin": max(0, int(ymin)),
129 "xmax": min(w, int(xmax)),
130 "ymax": min(h, int(ymax))
131 }
132 }
133 signature_list.append(item)
134
135 return signature_list
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-06-16 11:01:36
5 # @Last Modified : 2022-07-15 10:57:06
6 # @Description :
7
8 from .read_data import base64_to_bgr
9 from .read_data import bytes_to_bgr
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Create Date : 2022-06-16 10:59:50
5 # @Last Modified : 2022-08-03 14:59:15
6 # @Description :
7
8 import cv2
9 import base64
10 import numpy as np
11 import tensorflow as tf
12
13
14 def base64_to_bgr(img64):
15 """把 base64 转换成图片
16 单通道的灰度图或四通道的透明图都将自动转换成三通道的 BGR 图
17
18 Args:
19 img64 (TYPE): Description
20
21 Returns:
22 TYPE: image is a 3-D uint8 Tensor of shape [height, width, channels] where channels is BGR
23 """
24 encoded_image = base64.b64decode(img64)
25 img_array = np.frombuffer(encoded_image, np.uint8)
26 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
27 return image
28
29 def bytes_to_bgr(buffer: bytes):
30 """Read a byte stream as a OpenCV image
31
32 Args:
33 buffer (TYPE): bytes of a decoded image
34 """
35 img_array = np.frombuffer(buffer, np.uint8)
36 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
37
38 # image = tf.io.decode_image(buffer, channels=3)
39 # image = np.array(image)[...,::-1]
40 return image
...\ No newline at end of file ...\ No newline at end of file
1 # -*- coding: utf-8 -*-
2 # @Author : Lyu Kui
3 # @Email : 9428.al@gmail.com
4 # @Created Date : 2021-03-04 17:50:09
5 # @Last Modified : 2021-03-10 14:03:02
6 # @Description :
7
8 import os
9
10 image_types = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")
11
12
13 def list_images(basePath, contains=None):
14 # return the set of files that are valid
15 return list_files(basePath, validExts=image_types, contains=contains)
16
17 def list_files(basePath, validExts=None, contains=None):
18 # loop over the directory structure
19 for (rootDir, dirNames, filenames) in os.walk(basePath):
20 # loop over the filenames in the current directory
21 for filename in filenames:
22 # if the contains string is not none and the filename does not contain
23 # the supplied string, then ignore the file
24 if contains is not None and filename.find(contains) == -1:
25 continue
26
27 # determine the file extension of the current file
28 ext = filename[filename.rfind("."):].lower()
29
30 # check to see if the file is an image and should be processed
31 if validExts is None or ext.endswith(validExts):
32 # construct the path to the image and yield it
33 imagePath = os.path.join(rootDir, filename)
34 yield imagePath
35
36 def get_filename(filePath):
37 basename = os.path.basename(filePath)
38 fname, fextension = os.path.splitext(basename)
39 return fname
...\ No newline at end of file ...\ No newline at end of file
1 import cv2
2 import fitz
3 import numpy as np
4
5 def pdf_to_images(pdf_path: str):
6 """PDF 转 OpenCV Image
7
8 Args:
9 pdf_path (str): Description
10
11 Returns:
12 TYPE: Description
13 """
14 images = []
15 doc = fitz.open(pdf_path)
16 # producer = doc.metadata.get('producer')
17
18 for pno in range(doc.page_count):
19 page = doc.load_page(pno)
20
21 all_texts = page.get_text().replace('\n', '').strip()
22 # 根据经验过滤掉特殊情况
23 all_texts = all_texts.strip('Click to buy NOW!PDF-XChangewww.docu-track.comClick to buy NOW!PDF-XChangewww.docu-track.com')
24 blocks = page.get_text("dict")["blocks"]
25 imgblocks = [b for b in blocks if b["type"] == 1]
26
27 page_images = []
28 # 如果一个字都没有,
29 if len(all_texts) == 0 and len(imgblocks) != 0:
30 # # 这些 producer 包含碎图,如果真的是碎图我们把碎图拼接一下
31 # if producer in ['Microsoft: Print To PDF',
32 # 'GPL Ghostscript 8.71',
33 # 'doPDF Ver 7.3 Build 398 (Windows 7 Business Edition (SP 1) - Version: 6.1.7601 (x64))',
34 # '福昕阅读器PDF打印机 版本 11.0.114.4386']:
35 patches = []
36 for imgblock in imgblocks:
37 contents = imgblock["image"]
38 img_array = np.frombuffer(contents, dtype=np.uint8)
39 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
40 patches.append(image)
41 try:
42 try:
43 image = np.concatenate(patches, axis=0)
44 page_images.append(image)
45 except:
46 image = np.concatenate(patches, axis=1)
47 page_images.append(image)
48 except:
49 # 当两张拼不到一块的时候我们可以认为他是两张图,如果超过两张那就不一定了
50 if len(patches) == 2:
51 page_images = patches
52 else:
53 pix = page.get_pixmap(dpi=350)
54 contents = pix.tobytes(output="png")
55 img_array = np.frombuffer(contents, dtype=np.uint8)
56 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
57 page_images.append(image)
58 # else:
59 # for imgblock in imgblocks:
60 # contents = imgblock["image"]
61 # img_array = np.frombuffer(contents, dtype=np.uint8)
62 # image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
63 # page_images.append(image)
64 else:
65 pix = page.get_pixmap(dpi=350)
66 contents = pix.tobytes(output="png")
67 img_array = np.frombuffer(contents, dtype=np.uint8)
68 image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
69 page_images.append(image)
70 images.append(page_images)
71 return images
72
1 # import the necessary packages
2 from .count_frames import count_frames
3 from .fps import FPS
4 from .videostream import VideoStream
5 from .webcamvideostream import WebcamVideoStream
6 from .filevideostream import FileVideoStream
...\ No newline at end of file ...\ No newline at end of file
1 # import the necessary packages
2 # from ..convenience import is_cv3
3 import cv2
4
5 def count_frames(path, override=False):
6 # grab a pointer to the video file and initialize the total
7 # number of frames read
8 video = cv2.VideoCapture(path)
9 total = 0
10
11 # if the override flag is passed in, revert to the manual
12 # method of counting frames
13 if override:
14 total = count_frames_manual(video)
15
16 # otherwise, let's try the fast way first
17 else:
18 # lets try to determine the number of frames in a video
19 # via video properties; this method can be very buggy
20 # and might throw an error based on your OpenCV version
21 # or may fail entirely based on your which video codecs
22 # you have installed
23 try:
24 # # check if we are using OpenCV 3
25 # if is_cv3():
26 # total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
27
28 # # otherwise, we are using OpenCV 2.4
29 # else:
30 # total = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
31
32 total = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
33
34 # uh-oh, we got an error -- revert to counting manually
35 except:
36 total = count_frames_manual(video)
37
38 # release the video file pointer
39 video.release()
40
41 # return the total number of frames in the video
42 return total
43
44 def count_frames_manual(video):
45 # initialize the total number of frames read
46 total = 0
47
48 # loop over the frames of the video
49 while True:
50 # grab the current frame
51 (grabbed, frame) = video.read()
52
53 # check to see if we have reached the end of the
54 # video
55 if not grabbed:
56 break
57
58 # increment the total number of frames read
59 total += 1
60
61 # return the total number of frames in the video file
62 return total
...\ No newline at end of file ...\ No newline at end of file
1 # import the necessary packages
2 from threading import Thread
3 import sys
4 import cv2
5 import time
6
7 # import the Queue class from Python 3
8 if sys.version_info >= (3, 0):
9 from queue import Queue
10
11 # otherwise, import the Queue class for Python 2.7
12 else:
13 from Queue import Queue
14
15
16 class FileVideoStream:
17 def __init__(self, path, transform=None, queue_size=128):
18 # initialize the file video stream along with the boolean
19 # used to indicate if the thread should be stopped or not
20 self.stream = cv2.VideoCapture(path)
21 self.stopped = False
22 self.transform = transform
23
24 # initialize the queue used to store frames read from
25 # the video file
26 self.Q = Queue(maxsize=queue_size)
27 # intialize thread
28 self.thread = Thread(target=self.update, args=())
29 self.thread.daemon = True
30
31 def start(self):
32 # start a thread to read frames from the file video stream
33 self.thread.start()
34 return self
35
36 def update(self):
37 # keep looping infinitely
38 while True:
39 # if the thread indicator variable is set, stop the
40 # thread
41 if self.stopped:
42 break
43
44 # otherwise, ensure the queue has room in it
45 if not self.Q.full():
46 # read the next frame from the file
47 (grabbed, frame) = self.stream.read()
48
49 # if the `grabbed` boolean is `False`, then we have
50 # reached the end of the video file
51 if not grabbed:
52 self.stopped = True
53 break
54
55 # if there are transforms to be done, might as well
56 # do them on producer thread before handing back to
57 # consumer thread. ie. Usually the producer is so far
58 # ahead of consumer that we have time to spare.
59 #
60 # Python is not parallel but the transform operations
61 # are usually OpenCV native so release the GIL.
62 #
63 # Really just trying to avoid spinning up additional
64 # native threads and overheads of additional
65 # producer/consumer queues since this one was generally
66 # idle grabbing frames.
67 if self.transform:
68 frame = self.transform(frame)
69
70 # add the frame to the queue
71 self.Q.put(frame)
72 else:
73 time.sleep(0.1) # Rest for 10ms, we have a full queue
74
75 self.stream.release()
76
77 def read(self):
78 # return next frame in the queue
79 return self.Q.get()
80
81 # Insufficient to have consumer use while(more()) which does
82 # not take into account if the producer has reached end of
83 # file stream.
84 def running(self):
85 return self.more() or not self.stopped
86
87 def more(self):
88 # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
89 tries = 0
90 while self.Q.qsize() == 0 and not self.stopped and tries < 5:
91 time.sleep(0.1)
92 tries += 1
93
94 return self.Q.qsize() > 0
95
96 def stop(self):
97 # indicate that the thread should be stopped
98 self.stopped = True
99 # wait until stream resources are released (producer thread might be still grabbing frame)
100 self.thread.join()
1 # import the necessary packages
2 import datetime
3
4 class FPS:
5 def __init__(self):
6 # store the start time, end time, and total number of frames
7 # that were examined between the start and end intervals
8 self._start = None
9 self._end = None
10 self._numFrames = 0
11
12 def start(self):
13 # start the timer
14 self._start = datetime.datetime.now()
15 return self
16
17 def stop(self):
18 # stop the timer
19 self._end = datetime.datetime.now()
20
21 def update(self):
22 # increment the total number of frames examined during the
23 # start and end intervals
24 self._numFrames += 1
25
26 def elapsed(self):
27 # return the total number of seconds between the start and
28 # end interval
29 return (self._end - self._start).total_seconds()
30
31 def fps(self):
32 # compute the (approximate) frames per second
33 return self._numFrames / self.elapsed()
...\ No newline at end of file ...\ No newline at end of file
1 # import the necessary packages
2 from picamera.array import PiRGBArray
3 from picamera import PiCamera
4 from threading import Thread
5 import cv2
6
7 class PiVideoStream:
8 def __init__(self, resolution=(320, 240), framerate=32, **kwargs):
9 # initialize the camera
10 self.camera = PiCamera()
11
12 # set camera parameters
13 self.camera.resolution = resolution
14 self.camera.framerate = framerate
15
16 # set optional camera parameters (refer to PiCamera docs)
17 for (arg, value) in kwargs.items():
18 setattr(self.camera, arg, value)
19
20 # initialize the stream
21 self.rawCapture = PiRGBArray(self.camera, size=resolution)
22 self.stream = self.camera.capture_continuous(self.rawCapture,
23 format="bgr", use_video_port=True)
24
25 # initialize the frame and the variable used to indicate
26 # if the thread should be stopped
27 self.frame = None
28 self.stopped = False
29
30 def start(self):
31 # start the thread to read frames from the video stream
32 t = Thread(target=self.update, args=())
33 t.daemon = True
34 t.start()
35 return self
36
37 def update(self):
38 # keep looping infinitely until the thread is stopped
39 for f in self.stream:
40 # grab the frame from the stream and clear the stream in
41 # preparation for the next frame
42 self.frame = f.array
43 self.rawCapture.truncate(0)
44
45 # if the thread indicator variable is set, stop the thread
46 # and resource camera resources
47 if self.stopped:
48 self.stream.close()
49 self.rawCapture.close()
50 self.camera.close()
51 return
52
53 def read(self):
54 # return the frame most recently read
55 return self.frame
56
57 def stop(self):
58 # indicate that the thread should be stopped
59 self.stopped = True
1 # import the necessary packages
2 from .webcamvideostream import WebcamVideoStream
3
4 class VideoStream:
5 def __init__(self, src=0, usePiCamera=False, resolution=(320, 240),
6 framerate=32, **kwargs):
7 # check to see if the picamera module should be used
8 if usePiCamera:
9 # only import the picamera packages unless we are
10 # explicity told to do so -- this helps remove the
11 # requirement of `picamera[array]` from desktops or
12 # laptops that still want to use the `imutils` package
13 from .pivideostream import PiVideoStream
14
15 # initialize the picamera stream and allow the camera
16 # sensor to warmup
17 self.stream = PiVideoStream(resolution=resolution,
18 framerate=framerate, **kwargs)
19
20 # otherwise, we are using OpenCV so initialize the webcam
21 # stream
22 else:
23 self.stream = WebcamVideoStream(src=src)
24
25 def start(self):
26 # start the threaded video stream
27 return self.stream.start()
28
29 def update(self):
30 # grab the next frame from the stream
31 self.stream.update()
32
33 def read(self):
34 # return the current frame
35 return self.stream.read()
36
37 def stop(self):
38 # stop the thread and release any resources
39 self.stream.stop()
1 # import the necessary packages
2 from threading import Thread
3 import cv2
4
5 class WebcamVideoStream:
6 def __init__(self, src=0, name="WebcamVideoStream"):
7 # initialize the video camera stream and read the first frame
8 # from the stream
9 self.stream = cv2.VideoCapture(src)
10 (self.grabbed, self.frame) = self.stream.read()
11
12 # initialize the thread name
13 self.name = name
14
15 # initialize the variable used to indicate if the thread should
16 # be stopped
17 self.stopped = False
18
19 def start(self):
20 # start the thread to read frames from the video stream
21 t = Thread(target=self.update, name=self.name, args=())
22 t.daemon = True
23 t.start()
24 return self
25
26 def update(self):
27 # keep looping infinitely until the thread is stopped
28 while True:
29 # if the thread indicator variable is set, stop the thread
30 if self.stopped:
31 return
32
33 # otherwise, read the next frame from the stream
34 (self.grabbed, self.frame) = self.stream.read()
35
36 def read(self):
37 # return the frame most recently read
38 return self.frame
39
40 def stop(self):
41 # indicate that the thread should be stopped
42 self.stopped = True
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!