word2vec.py
1.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import re
import numpy as np
from gensim.models import word2vec
word2vec_model = word2vec.Word2Vec.load('/Users/zhouweiqi/Downloads/xgboost/models/word2vec_train_single.model')
def simple_word2vec(text):
clean_text = text.strip()
text_len = len(clean_text)
digit_num = 0
en_num = 0
cn_num = 0
space_num = 0
other_num = 0
for char in clean_text:
if char.isdigit():
digit_num += 1
elif re.match(r'[A-Za-z]', char):
en_num += 1
elif char.isspace():
space_num += 1
elif re.match(r'[\u4e00-\u9fa5]', char):
cn_num += 1
else:
other_num += 1
vec = [text_len/100,
cn_num/text_len,
en_num/text_len,
digit_num/text_len,
# space_num/text_len,
other_num/text_len,
]
# print(text)
# print(clean_text)
# print('-------------')
# print(en_num)
# print(cn_num)
# print(digit_num)
# print(space_num)
# print(other_num)
# print('-------------')
return vec
def jwq_word2vec(text, text_vec_max_lens=1500):
clean_text = text.strip()
sentence_vec = list()
for char in clean_text:
try:
word_vec = word2vec_model.wv[char]
sentence_vec.extend(word_vec)
except:
word_vec = word2vec_model.wv['unk']
sentence_vec.extend(word_vec)
if len(sentence_vec) > text_vec_max_lens:
sentence_vec = sentence_vec[:text_vec_max_lens]
else:
padding_number = text_vec_max_lens - len(sentence_vec)
for _ in range(padding_number):
sentence_vec.append(0.)
sentence_vec = np.float64(sentence_vec)
# print(type(sentence_vec))
return sentence_vec