mirror of
https://github.com/fxsjy/jieba.git
synced 2025-07-10 00:01:33 +08:00
merge from master
This commit is contained in:
parent
2857ae45cc
commit
dce353f88b
86
README.md
86
README.md
@ -29,19 +29,31 @@ http://jiebademo.ap01.aws.af.cm/
|
||||
|
||||
(Powered by Appfog)
|
||||
|
||||
Python Version
|
||||
==============
|
||||
* 目前master分支是只支持Python2.x 的
|
||||
* Python3.x 版本的分支也已经基本可用: https://github.com/fxsjy/jieba/tree/jieba3k
|
||||
网站代码:https://github.com/fxsjy/jiebademo
|
||||
|
||||
|
||||
Usage
|
||||
========
|
||||
Python 2.x 下的安装
|
||||
===================
|
||||
* 全自动安装:`easy_install jieba` 或者 `pip install jieba`
|
||||
* 半自动安装:先下载http://pypi.python.org/pypi/jieba/ ,解压后运行python setup.py install
|
||||
* 手动安装:将jieba目录放置于当前目录或者site-packages目录
|
||||
* 通过import jieba 来引用 (第一次import时需要构建Trie树,需要几秒时间)
|
||||
|
||||
|
||||
Python 3.x 下的安装
|
||||
====================
|
||||
* 目前master分支是只支持Python2.x 的
|
||||
* Python3.x 版本的分支也已经基本可用: https://github.com/fxsjy/jieba/tree/jieba3k
|
||||
|
||||
git clone https://github.com/fxsjy/jieba.git
|
||||
git checkout jieba3k
|
||||
python setup.py install
|
||||
|
||||
结巴分词Java版本
|
||||
================
|
||||
作者:piaolingxue
|
||||
地址:https://github.com/huaban/jieba-analysis
|
||||
|
||||
Algorithm
|
||||
========
|
||||
* 基于Trie树结构实现高效的词图扫描,生成句子中汉字所有可能成词情况所构成的有向无环图(DAG)
|
||||
@ -120,9 +132,9 @@ Output:
|
||||
* 用法示例
|
||||
|
||||
>>> import jieba.posseg as pseg
|
||||
>>> words =pseg.cut("我爱北京天安门")
|
||||
>>> words = pseg.cut("我爱北京天安门")
|
||||
>>> for w in words:
|
||||
... print(w.word,w.flag)
|
||||
... print w.word, w.flag
|
||||
...
|
||||
我 r
|
||||
爱 v
|
||||
@ -142,6 +154,50 @@ Output:
|
||||
|
||||
* 实验结果:在4核3.4GHz Linux机器上,对金庸全集进行精确分词,获得了1MB/s的速度,是单进程版的3.3倍。
|
||||
|
||||
|
||||
功能 6) : Tokenize:返回词语在原文的起始位置
|
||||
============================================
|
||||
* 注意,输入参数只接受unicode
|
||||
* 默认模式
|
||||
|
||||
```python
|
||||
result = jieba.tokenize('永和服装饰品有限公司')
|
||||
for tk in result:
|
||||
print("word %s\t\t start: %d \t\t end:%d" % (tk[0], tk[1], tk[2]))
|
||||
```
|
||||
|
||||
```
|
||||
word 永和 start: 0 end:2
|
||||
word 服装 start: 2 end:4
|
||||
word 饰品 start: 4 end:6
|
||||
word 有限公司 start: 6 end:10
|
||||
|
||||
```
|
||||
|
||||
* 搜索模式
|
||||
|
||||
```python
|
||||
result = jieba.tokenize('永和服装饰品有限公司', mode='search')
|
||||
for tk in result:
|
||||
print("word %s\t\t start: %d \t\t end:%d" % (tk[0], tk[1], tk[2]))
|
||||
```
|
||||
|
||||
```
|
||||
word 永和 start: 0 end:2
|
||||
word 服装 start: 2 end:4
|
||||
word 饰品 start: 4 end:6
|
||||
word 有限 start: 6 end:8
|
||||
word 公司 start: 8 end:10
|
||||
word 有限公司 start: 6 end:10
|
||||
```
|
||||
|
||||
|
||||
功能 7) : ChineseAnalyzer for Whoosh搜索引擎
|
||||
============================================
|
||||
* 引用: `from jieba.analyse import ChineseAnalyzer `
|
||||
* 用法示例:https://github.com/fxsjy/jieba/blob/master/test/test_whoosh.py
|
||||
|
||||
|
||||
其他词典
|
||||
========
|
||||
1. 占用内存较小的词典文件
|
||||
@ -189,7 +245,7 @@ jieba采用延迟加载,"import jieba"不会立即触发词典的加载,一
|
||||
|
||||
Change Log
|
||||
==========
|
||||
http://www.oschina.net/p/jieba/news#list
|
||||
https://github.com/fxsjy/jieba/blob/master/Changelog
|
||||
|
||||
jieba
|
||||
========
|
||||
@ -227,16 +283,16 @@ Code example: segmentation
|
||||
#encoding=utf-8
|
||||
import jieba
|
||||
|
||||
seg_list = jieba.cut("我来到北京清华大学",cut_all=True)
|
||||
print("Full Mode:", "/ ".join(seg_list)) #全模式
|
||||
seg_list = jieba.cut("我来到北京清华大学", cut_all=True)
|
||||
print("Full Mode:", "/ ".join(seg_list)) # 全模式
|
||||
|
||||
seg_list = jieba.cut("我来到北京清华大学",cut_all=False)
|
||||
print("Default Mode:", "/ ".join(seg_list)) #默认模式
|
||||
seg_list = jieba.cut("我来到北京清华大学", cut_all=False)
|
||||
print("Default Mode:", "/ ".join(seg_list)) # 默认模式
|
||||
|
||||
seg_list = jieba.cut("他来到了网易杭研大厦")
|
||||
print(", ".join(seg_list))
|
||||
|
||||
seg_list = jieba.cut_for_search("小明硕士毕业于中国科学院计算所,后在日本京都大学深造") #搜索引擎模式
|
||||
seg_list = jieba.cut_for_search("小明硕士毕业于中国科学院计算所,后在日本京都大学深造") # 搜索引擎模式
|
||||
print(", ".join(seg_list))
|
||||
|
||||
Output:
|
||||
@ -296,7 +352,7 @@ Initialization
|
||||
By default, Jieba employs lazy loading to only build the trie once it is necessary. This takes 1-3 seconds once, after which it is not initialized again. If you want to initialize Jieba manually, you can call:
|
||||
|
||||
import jieba
|
||||
jieba.initialize() #(optional)
|
||||
jieba.initialize() # (optional)
|
||||
|
||||
You can also specify the dictionary (not supported before version 0.28) :
|
||||
|
||||
|
@ -1,13 +1,9 @@
|
||||
from __future__ import with_statement
|
||||
__version__ = '0.31'
|
||||
__license__ = 'MIT'
|
||||
|
||||
import re
|
||||
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
import pprint
|
||||
from . import finalseg
|
||||
import time
|
||||
|
||||
@ -42,7 +38,7 @@ def gen_trie(f_name):
|
||||
ltotal+=freq
|
||||
p = trie
|
||||
for c in word:
|
||||
if not c in p:
|
||||
if c not in p:
|
||||
p[c] ={}
|
||||
p = p[c]
|
||||
p['']='' #ending flag
|
||||
@ -153,7 +149,7 @@ def get_DAG(sentence):
|
||||
if c in p:
|
||||
p = p[c]
|
||||
if '' in p:
|
||||
if not i in DAG:
|
||||
if i not in DAG:
|
||||
DAG[i]=[]
|
||||
DAG[i].append(j)
|
||||
j+=1
|
||||
@ -166,7 +162,7 @@ def get_DAG(sentence):
|
||||
i+=1
|
||||
j=i
|
||||
for i in range(len(sentence)):
|
||||
if not i in DAG:
|
||||
if i not in DAG:
|
||||
DAG[i] =[i]
|
||||
return DAG
|
||||
|
||||
@ -189,7 +185,7 @@ def __cut_DAG(sentence):
|
||||
yield buf
|
||||
buf=''
|
||||
else:
|
||||
if not (buf in FREQ):
|
||||
if (buf not in FREQ):
|
||||
regognized = finalseg.cut(buf)
|
||||
for t in regognized:
|
||||
yield t
|
||||
@ -204,7 +200,7 @@ def __cut_DAG(sentence):
|
||||
if len(buf)==1:
|
||||
yield buf
|
||||
else:
|
||||
if not (buf in FREQ):
|
||||
if (buf not in FREQ):
|
||||
regognized = finalseg.cut(buf)
|
||||
for t in regognized:
|
||||
yield t
|
||||
@ -213,7 +209,7 @@ def __cut_DAG(sentence):
|
||||
yield elem
|
||||
|
||||
def cut(sentence,cut_all=False):
|
||||
if( type(sentence) is bytes):
|
||||
if isinstance(sentence, bytes):
|
||||
try:
|
||||
sentence = sentence.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
@ -230,8 +226,9 @@ def cut(sentence,cut_all=False):
|
||||
if cut_all:
|
||||
cut_block = __cut_all
|
||||
for blk in blocks:
|
||||
if len(blk)==0:
|
||||
continue
|
||||
if re_han.match(blk):
|
||||
#pprint.pprint(__cut_DAG(blk))
|
||||
for word in cut_block(blk):
|
||||
yield word
|
||||
else:
|
||||
@ -287,7 +284,7 @@ def add_word(word, freq, tag=None):
|
||||
user_word_tag_tab[word] = tag.strip()
|
||||
p = trie
|
||||
for c in word:
|
||||
if not c in p:
|
||||
if c not in p:
|
||||
p[c] = {}
|
||||
p = p[c]
|
||||
p[''] = '' # ending flag
|
||||
@ -307,7 +304,7 @@ def __lcut_for_search(sentence):
|
||||
def enable_parallel(processnum=None):
|
||||
global pool,cut,cut_for_search
|
||||
if os.name=='nt':
|
||||
raise Exception("parallel mode only supports posix system")
|
||||
raise Exception("jieba: parallel mode only supports posix system")
|
||||
if sys.version_info[0]==2 and sys.version_info[1]<6:
|
||||
raise Exception("jieba: the parallel feature needs Python version>2.5 ")
|
||||
from multiprocessing import Pool,cpu_count
|
||||
@ -348,7 +345,7 @@ def set_dictionary(dictionary_path):
|
||||
with DICT_LOCK:
|
||||
abs_path = os.path.normpath( os.path.join( os.getcwd(), dictionary_path ) )
|
||||
if not os.path.exists(abs_path):
|
||||
raise Exception("path does not exists:" + abs_path)
|
||||
raise Exception("jieba: path does not exists:" + abs_path)
|
||||
DICTIONARY = abs_path
|
||||
initialized = False
|
||||
|
||||
@ -360,7 +357,7 @@ def get_abs_path_dict():
|
||||
def tokenize(unicode_sentence,mode="default"):
|
||||
#mode ("default" or "search")
|
||||
if not isinstance(unicode_sentence, str):
|
||||
raise Exception("jieba: the input parameter should string.")
|
||||
raise Exception("jieba: the input parameter should unicode.")
|
||||
start = 0
|
||||
if mode=='default':
|
||||
for w in cut(unicode_sentence):
|
||||
|
@ -138,7 +138,7 @@ def __cut_DAG(sentence):
|
||||
yield pair(buf,word_tag_tab.get(buf,'x'))
|
||||
buf=''
|
||||
else:
|
||||
if not (buf in jieba.FREQ):
|
||||
if (buf not in jieba.FREQ):
|
||||
regognized = __cut_detail(buf)
|
||||
for t in regognized:
|
||||
yield t
|
||||
@ -153,7 +153,7 @@ def __cut_DAG(sentence):
|
||||
if len(buf)==1:
|
||||
yield pair(buf,word_tag_tab.get(buf,'x'))
|
||||
else:
|
||||
if not (buf in jieba.FREQ):
|
||||
if (buf not in jieba.FREQ):
|
||||
regognized = __cut_detail(buf)
|
||||
for t in regognized:
|
||||
yield t
|
||||
@ -162,7 +162,7 @@ def __cut_DAG(sentence):
|
||||
yield pair(elem,word_tag_tab.get(elem,'x'))
|
||||
|
||||
def __cut_internal(sentence):
|
||||
if not ( type(sentence) is str):
|
||||
if not isinstance(sentence, str):
|
||||
try:
|
||||
sentence = sentence.decode('utf-8')
|
||||
except:
|
||||
|
@ -6,14 +6,16 @@ import jieba
|
||||
jieba.enable_parallel()
|
||||
|
||||
url = sys.argv[1]
|
||||
content = open(url,"rb").read()
|
||||
t1 = time.time()
|
||||
words = "/ ".join(jieba.cut(content))
|
||||
with open(url,"rb") as content:
|
||||
content = content.read()
|
||||
t1 = time.time()
|
||||
words = "/ ".join(jieba.cut(content))
|
||||
t2 = time.time()
|
||||
tm_cost = t2-t1
|
||||
print('cost',tm_cost)
|
||||
print('speed' , len(content)/tm_cost, " bytes/second")
|
||||
|
||||
t2 = time.time()
|
||||
tm_cost = t2-t1
|
||||
with open("1.log","wb") as log_f:
|
||||
log_f.write(words.encode('utf-8'))
|
||||
|
||||
log_f = open("1.log","wb")
|
||||
log_f.write(words.encode('utf-8'))
|
||||
print('speed' , len(content)/tm_cost, " bytes/second")
|
||||
|
||||
|
@ -5,19 +5,15 @@ import jieba
|
||||
jieba.initialize()
|
||||
|
||||
url = sys.argv[1]
|
||||
content = open(url,"rb").read()
|
||||
t1 = time.time()
|
||||
words = "/ ".join(jieba.cut(content))
|
||||
|
||||
t2 = time.time()
|
||||
tm_cost = t2-t1
|
||||
|
||||
log_f = open("1.log","wb")
|
||||
log_f.write(words.encode('utf-8'))
|
||||
|
||||
|
||||
log_f.write(bytes("/ ".join(words),'utf-8'))
|
||||
|
||||
print('cost',tm_cost)
|
||||
print('speed' , len(content)/tm_cost, " bytes/second")
|
||||
with open(url,"rb") as content:
|
||||
content = content.read()
|
||||
t1 = time.time()
|
||||
words = "/ ".join(jieba.cut(content))
|
||||
t2 = time.time()
|
||||
tm_cost = t2-t1
|
||||
print('cost',tm_cost)
|
||||
print('speed' , len(content)/tm_cost, " bytes/second")
|
||||
|
||||
with open("1.log","wb") as log_f:
|
||||
log_f.write(words.encode('utf-8'))
|
||||
log_f.write(bytes("/ ".join(words),'utf-8'))
|
||||
|
Loading…
x
Reference in New Issue
Block a user