说明
- 最终的模型文件:
链接:https://pan.baidu.com/s/1acGhejPCw98Mx4iKozVZdw 提取码:vsm1 - 源码github地址:https://github.com/datadevsh/wiki-gensim-word2vector
- 如果遇到编码问题,参考《*文件解析成中文遇到的变量类型、编码问题》
https://my.oschina.net/datadev/blog/1836529 - 如果使用pycharm,可能会发生内存不足。把两个pycharm64.exe.vmoptions文件的-Xmx参数调大。
执行时间
1 解析xml 13分钟
2 繁体2简体 1分钟
3 jieba分词 27分钟
4 模型训练 22分钟
总计63分钟。
1. 下载文件
下载pages-articles.xml文件。打开下面的链接,选最近的日期,进入页面后,搜索“pages-articles.xml”。
下载地址:https://dumps.wikimedia.org/zhwiki/
2. 解析xml
# -*- coding: utf-8 -*-
# 解析xml
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus
import time
begin = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s"% ' '.join(sys.argv))
# if len(sys.argv) > 1:
# print(globals()['__doc__'] % locals())
# sys.exit(1)
inp,outp = sys.argv[1:3]
space = ' '
i = 0
output = open(outp,'w',encoding='utf-8')
wiki = WikiCorpus(inp,lemmatize=False,dictionary={ })
for text in wiki.get_texts():
s = space.join(text)+"\n"
output.write(s)
i = i+1
if(i% 10000 == 0):
logger.info("Saved "+str(i) + " articles")
output.close()
logger.info("Finished Saved "+ str(i) +" articles")
end = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("begin",begin)
print("end ",end)
# python 1process-xml.py zhwiki-20180620-pages-articles.xml.1.49G.bz2 wiki.zh.1.49G.text
3. 繁体转简体
使用opencc。下载地址如下,下载opencc-1.0.1-win64.7z。
https://bintray.com/package/files/byvoid/opencc/OpenCC
.\pencc -i wiki_text.txt -o test.txt -c t2s.json
-i 输入
-o 输出
运行1分钟左右。
4. jieba分词
#-*- coding: utf-8 -*-
import jieba
import jieba.analyse
import codecs,sys
import time
begin = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) #
def cut_words(sentence):
return " ".join(jieba.cut(sentence)).encode('utf-8')
f=codecs.open('D:/soft/opencc-1.0.1-win64/wiki-ts.txt','r',encoding='utf8')
target = codecs.open("D:/soft/opencc-1.0.1-win64/wiki.jieba.txt",'w',encoding='utf8')
print(" open file")
line_num = 1
line = f.readline()
while line:
if(line_num % 10000 == 0):
print('---------------processing',line_num,'articles------------')
line_seg=" ".join(jieba.cut(line))
target.writelines(line_seg)
line_num=line_num + 1
line = f.readline()
f.close()
target.close()
end = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) #
print("begin",begin)
print("end ",end)
exit()
5. 模型训练
#-*- coding: utf-8 -*-
# @Describe:
# @File : word2vec-model.py
import logging
import os.path
import sys
import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import time
begin = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# if len(sys.argv) < 4:
# print(globals()['__doc__'] % locals())
# sys.exit(1)
# inp = "D:/soft/opencc-1.0.1-win64/wiki-jieba-test.txt"
inp = "D:/soft/opencc-1.0.1-win64/wiki.jieba.txt"
outp1 ='D:/soft/opencc-1.0.1-win64/wiki.model'
outp2 = 'D:/soft/opencc-1.0.1-win64/wiki.vector'
model = Word2Vec(LineSentence(inp),size=400,window=5,min_count=5,workers=multiprocessing.cpu_count())
model.save(outp1)
model.wv.save_word2vec_format(outp2,binary=False)
end = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("begin",begin)
print("end ",end)
#python word2vec-model.py txt model wiki.zh.text.vector
#opencc -i wiki_text.txt -o test.txt -c t2s.json
6.测试
#-*- coding: utf-8 -*-
# @Describe:
# @File : test-model.py
from gensim.models import Word2Vec
import time
begin = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
model = Word2Vec.load('D:/soft/opencc-1.0.1-win64/wiki.model')
# testwords = ['苹果','数学','学术','白痴','篮球']
# for i in range(5):
# res = model.most_similar(testwords[i])
# print(testwords[i])
# print(res)
# 二级类目 '日用百货','收纳整理','家纺','家庭清洁','绿植园艺','厨房用品'
# testwords = ['日用百货','收纳整理','家纺','家庭清洁','绿植园艺','厨房用品']
word = '被子'
for i in testwords:
sim = model.n_similarity(word,i)
print(i,sim)
testwords = ['苹果','数学','学术','白痴','篮球']
for i in range(5):
res = en_wiki_word2vec_model.most_similar(testwords[i])
print(testwords[i])
print(res)
print(model.most_similar(word))
end = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("begin",begin)
print("end ",end)
# 收纳整理 0.16833255
# 家纺 0.14426242
# 家庭清洁 0.066685855
# 绿植园艺 0.028275765
# 厨房用品 0.2936325
# 苹果
# [('apple', 0.5410169363021851), ('苹果公司', 0.4918888807296753), ('咬一口', 0.4741284251213074), ('洋葱', 0.4696866571903229), ('冰淇淋', 0.4614587426185608), ('苹果电脑', 0.45998817682266235), ('黑莓', 0.4557930827140808), ('水果', 0.4546721577644348), ('iphone', 0.44593721628189087), ('草莓', 0.4437388479709625)]
# 数学
# [('微积分', 0.7083343267440796), ('算术', 0.6934097409248352), ('数学分析', 0.663016140460968), ('概率论', 0.6389687061309814), ('数论', 0.6296793222427368), ('逻辑学', 0.6191371083259583), ('几何学', 0.60764479637146), ('数理逻辑', 0.5989662408828735), ('物理', 0.5965093970298767), ('高等数学', 0.5895018577575684)]
# 学术
# [('学术研究', 0.7319201231002808), ('汉学', 0.5988526344299316), ('学术活动', 0.5887891054153442), ('科学研究', 0.5864561796188354), ('学术界', 0.5863242149353027), ('教学研究', 0.5767545700073242), ('教研', 0.5732147097587585), ('学术交流', 0.561274528503418), ('科研', 0.5595779418945312), ('医学教育', 0.5571168661117554)]
# 白痴
# [('疯子', 0.5986206531524658), ('书呆子', 0.5612877607345581), ('骗子', 0.538498044013977), ('怪胎', 0.5305827856063843), ('爱哭鬼', 0.5293511152267456), ('*', 0.5216787457466125), ('自恋', 0.5185167789459229), ('变态', 0.5165976285934448), ('自以为是', 0.516464114189148), ('蠢', 0.5106762051582336)]
# 篮球
# [('美式足球', 0.633753776550293), ('橄榄球', 0.6222437620162964), ('排球', 0.5964736938476562), ('棒球', 0.5949814319610596), ('男子篮球', 0.5927262306213379), ('冰球', 0.591292142868042), ('篮球员', 0.5610231161117554), ('篮球运动', 0.5576823353767395), ('足球', 0.5409365892410278), ('橄榄球队', 0.5348620414733887)]