word2vec.py 1.75 KB
import re
import numpy as np
from gensim.models import word2vec
word2vec_model = word2vec.Word2Vec.load('/Users/zhouweiqi/Downloads/xgboost/models/word2vec_train_single.model')


def simple_word2vec(text):
    clean_text = text.strip()
    text_len = len(clean_text)

    digit_num = 0
    en_num = 0
    cn_num = 0
    space_num = 0
    other_num = 0
    for char in clean_text:
        if char.isdigit():
            digit_num += 1
        elif re.match(r'[A-Za-z]', char):
            en_num += 1
        elif char.isspace():
            space_num += 1
        elif re.match(r'[\u4e00-\u9fa5]', char):
            cn_num += 1
        else:
            other_num += 1
    
    vec = [text_len/100,
           cn_num/text_len,
           en_num/text_len,
           digit_num/text_len,
           # space_num/text_len,
           other_num/text_len,
           ]

    # print(text)
    # print(clean_text)
    # print('-------------')
    # print(en_num)
    # print(cn_num)
    # print(digit_num)
    # print(space_num)
    # print(other_num)
    # print('-------------')

    return vec

def jwq_word2vec(text, text_vec_max_lens=1500):
    clean_text = text.strip()

    sentence_vec = list()
    for char in clean_text:
        try:
            word_vec = word2vec_model.wv[char]
            sentence_vec.extend(word_vec)
        except:
            word_vec = word2vec_model.wv['unk']
            sentence_vec.extend(word_vec)
    
    if len(sentence_vec) > text_vec_max_lens:
        sentence_vec = sentence_vec[:text_vec_max_lens]
    else:
        padding_number = text_vec_max_lens - len(sentence_vec)
        for _ in range(padding_number):
            sentence_vec.append(0.)

    sentence_vec = np.float64(sentence_vec)
    # print(type(sentence_vec))
    return sentence_vec