direction_classifier.py
2.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import cv2
import tensorflow as tf
from math import *
import numpy as np
import logging
class Direction_Classifier(object):
def __init__(self, model_path):
with tf.Graph().as_default():
with tf.compat.v1.gfile.GFile(model_path, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.compat.v1.import_graph_def(graph_def, name='')
self.x = tf.compat.v1.get_default_graph().get_tensor_by_name("image_batch:0")
self.output = tf.compat.v1.get_default_graph().get_tensor_by_name("hands_gesture_inference/fc/BiasAdd:0")
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.compat.v1.Session(config=config)
logging.info('******** Success Init Rotation Classifier ********')
def reg(self, input_image):
height, width, _ = input_image.shape
input_image_new = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR)
image_resize = cv2.resize(input_image_new, (112, 112), interpolation=cv2.INTER_CUBIC)
image_resize = image_resize / 256.0
image_resize = np.expand_dims(image_resize, axis=0)
logit = self.sess.run(self.output, feed_dict={self.x: image_resize})
class_pre = np.argmax(np.squeeze(logit))
print('direction class is: {}'.format(class_pre))
rotation_image = input_image
if class_pre == 2:
rotation_image = self.rotation_util(input_image, 180, height, width)
elif class_pre == 1:
rotation_image = self.rotation_util(input_image, 90, height, width)
elif class_pre == 3:
rotation_image = self.rotation_util(input_image, 270, height, width)
return rotation_image, class_pre
def rotation_util(self, img, degree, height, width):
height_new = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
width_new = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
mat_rotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)
mat_rotation[0, 2] += (width_new - width) / 2
mat_rotation[1, 2] += (height_new - height) / 2
img_rotation = cv2.warpAffine(img, mat_rotation, (width_new, height_new), borderValue=(255, 255, 255))
return img_rotation