#-*- coding=utf8 -*-
import jieba
import re
from tokenizer import cut_hanlp
# jieba.load_userdict("dict.txt")
# # 设置高词频:一个
# jieba.suggest_freq('台中',tune=True)
# 设置高词频:dict.txt中的每一行都设置一下
# fp=open("dict.txt",'r',encoding='utf8')
# for line in fp:
# line=line.strip()
# jieba.suggest_freq(line, tune=True)
# # 设置高词频:dict.txt中的每一行都设置一下快速方法
# [jieba.suggest_freq(line.strip(), tune=True) for line in open("dict.txt",'r',encoding='utf8')]
if __name__=="__main__":
string="台中正确应该不会被切开。"
jieba.load_userdict("dict.txt")
# jieba.suggest_freq('台中', tune=True)
words_jieba = " ".join(jieba.cut(string,HMM=False))
words_hanlp=cut_hanlp(string)
print("words_jieba:"+words_jieba,'\n',"words_hanlp:"+words_hanlp)
#encoding=utf8
import os,gc,re,sys
from jpype import *
# root_path="/home/lhq/桌面/NLP_basis/hanlp"
# djclass_path="-Djava.class.path="+root_path+os.sep+"hanlp-1.6.2.jar:"+root_path
startJVM(getDefaultJVMPath(), "-Djava.class.path=/home/lhq/桌面/NLP_basis/hanlp/hanlp-1.7.3.jar:/home/lhq/桌面/NLP_basis/hanlp",
"-Xms1g",
"-Xmx1g")
Tokenizer = JClass('com.hankcs.hanlp.tokenizer.StandardTokenizer')
def to_string(sentence,return_generator=False):
if return_generator:
return (word_pos_item.toString().split('/') for word_pos_item in Tokenizer.segment(sentence))
else:
return " ".join([word_pos_item.toString().split('/')[0] for word_pos_item in Tokenizer.segment(sentence)])
# 这里的“”.split('/')可以将string拆分成list 如:'ssfa/fsss'.split('/') => ['ssfa', 'fsss']
def seg_sentences(sentence,with_filter=True,return_generator=False):
segs=to_string(sentence,return_generator=return_generator)
if with_filter:
g = [word_pos_pair[0] for word_pos_pair in segs if len(word_pos_pair)==2 and word_pos_pair[0]!=' ' and word_pos_pair[1] not in drop_pos_set]
else:
g = [word_pos_pair[0] for word_pos_pair in segs if len(word_pos_pair)==2 and word_pos_pair[0]!=' ']
return iter(g) if return_generator else g
def cut_hanlp(raw_sentence,return_list=True):
if len(raw_sentence.strip())>0:
return to_string(raw_sentence) if return_list else iter(to_string(raw_sentence))