test.py
1.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import os
import cv2
import load_util
import media_util
import numpy as np
from sklearn.metrics import confusion_matrix
import fighting_filter, emotion_filter, argue_filter, audio_filter, class_filter
import video_filter, pose_filter, flow_filter
def accuracy_cal(config):
label_file_path = config['VIDEO']['LABEL_PATH']
frame_list_dir = config['VIDEO']['FRAME_LIST_DIR']
final_file_name = config['AUDIO']['RESULT_FILE']
final_file_path = os.path.join(frame_list_dir, final_file_name)
final_file_lines = open(final_file_path).readlines()
label_file_lines = open(label_file_path).readlines()
final_pairs = {line.strip().split(' ')[0]: line.strip().split(' ')[1] for line in final_file_lines}
lines_num = len(label_file_lines) - 1
hit = 0
for i, label_line in enumerate(label_file_lines):
if i == 0:
continue
file, label = label_line.strip().split(' ')
final_pre = final_pairs[file]
final_pre_class = np.argmax(np.array(final_pre.split(','))) + 1
print(final_pre_class, label)
if final_pre_class == int(label):
hit += 1
return hit/lines_num
def main():
config_path = r'config.yaml'
config = load_util.load_config(config_path)
media_util.extract_wav(config)
media_util.extract_frame(config)
media_util.extract_frame_pose(config)
media_util.extract_is10(config)
media_util.extract_random_face_feature(config)
media_util.extract_mirror(config)
fighting_2_filter.start_filter(config)
emotion_filter.start_filter(config)
audio_filter.start_filter(config)
class_filter.start_filter(config)
video_filter.start_filter(config)
pose_filter.start_filter(config)
flow_filter.start_filter(config)
acc = accuracy_cal(config)
print(acc)
if __name__ == '__main__':
main()