import jieba
txt = open("聊斋志异白话简写版.txt", "r", encoding='utf-8').read()
words = jieba.lcut(txt) # 使用精确模式对文本进行分词
counts = {} # 通过键值对的形式存储词语及其出现的次数
for word in words:
if len(word) == 1:
continue
elif word == "小倩" or word == "鬼妻":
rword = "聂小倩"
elif word == "采臣":
rword = "唐僧"
elif word == "黑山" or word=="万妖群魔之首":
rword = "黑山老妖"
elif word=="十四娘":
rword="辛十四娘"
elif word == "子楚":
rword = "孙子楚"
elif word=="赵阿宝":
rword="阿宝"
else:
rword = word
counts[rword] = counts.get(rword,0) + 1
items = list(counts.items())#将键值对转换成列表
items.sort(key=lambda x: x[1], reverse=True) # 根据词语出现的次数进行从大到小排序
for i in range(20):
word, count = items[i]
print("{0:<10}{1:>5}".format(word, count))
相关文章
- 12-17NLP基础—jieba分词
- 12-17jieba 库的使用和好玩的词云
- 12-17jieba源碼研讀筆記(十四) - 詞性標注函數入口
- 12-17(八)通俗易懂理解——jieba中的HMM中文分词原理
- 12-17Note of Jieba
- 12-17jieba分词以及wordcloud词云
- 12-17Jieba库使用和好玩的词云
- 12-17Python分词工具——jieba
- 12-17北大开源分词工具包: 准确率远超THULAC、jieba 分词
- 12-17mac Pycharm 导入jieba报错解决