mirror of
https://github.com/fxsjy/jieba.git
synced 2025-07-10 00:01:33 +08:00
fix setup.py in python2.7
This commit is contained in:
parent
5704e23bbf
commit
1e20c89b66
@ -1,3 +1,6 @@
|
|||||||
|
2019-1-20: version 0.42.1
|
||||||
|
1. 修复setup.py在python2.7版本无法工作的问题 (issue #809)
|
||||||
|
|
||||||
2019-1-13: version 0.42
|
2019-1-13: version 0.42
|
||||||
1. 修复paddle模式空字符串coredump问题 @JesseyXujin
|
1. 修复paddle模式空字符串coredump问题 @JesseyXujin
|
||||||
2. 修复cut_all模式切分丢字问题 @fxsjy
|
2. 修复cut_all模式切分丢字问题 @fxsjy
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import absolute_import, unicode_literals
|
from __future__ import absolute_import, unicode_literals
|
||||||
|
|
||||||
__version__ = '0.42'
|
__version__ = '0.42.1'
|
||||||
__license__ = 'MIT'
|
__license__ = 'MIT'
|
||||||
|
|
||||||
import marshal
|
import marshal
|
||||||
@ -300,7 +300,7 @@ class Tokenizer(object):
|
|||||||
sentence = strdecode(sentence)
|
sentence = strdecode(sentence)
|
||||||
if use_paddle and is_paddle_installed:
|
if use_paddle and is_paddle_installed:
|
||||||
# if sentence is null, it will raise core exception in paddle.
|
# if sentence is null, it will raise core exception in paddle.
|
||||||
if sentence is None or sentence == "" or sentence == u"":
|
if sentence is None or len(sentence) == 0:
|
||||||
return
|
return
|
||||||
import jieba.lac_small.predict as predict
|
import jieba.lac_small.predict as predict
|
||||||
results = predict.get_sent(sentence)
|
results = predict.get_sent(sentence)
|
||||||
|
4
setup.py
4
setup.py
@ -43,7 +43,7 @@ GitHub: https://github.com/fxsjy/jieba
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
setup(name='jieba',
|
setup(name='jieba',
|
||||||
version='0.42',
|
version='0.42.1',
|
||||||
description='Chinese Words Segmentation Utilities',
|
description='Chinese Words Segmentation Utilities',
|
||||||
long_description=LONGDOC,
|
long_description=LONGDOC,
|
||||||
author='Sun, Junyi',
|
author='Sun, Junyi',
|
||||||
@ -71,5 +71,5 @@ setup(name='jieba',
|
|||||||
keywords='NLP,tokenizing,Chinese word segementation',
|
keywords='NLP,tokenizing,Chinese word segementation',
|
||||||
packages=['jieba'],
|
packages=['jieba'],
|
||||||
package_dir={'jieba':'jieba'},
|
package_dir={'jieba':'jieba'},
|
||||||
package_data={'jieba':['*.*','finalseg/*','analyse/*','posseg/*', 'lac_small/*','lac_small/model_baseline/*']}
|
package_data={'jieba':['*.*','finalseg/*','analyse/*','posseg/*', 'lac_small/*.py','lac_small/*.dic', 'lac_small/model_baseline/*']}
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user