rewrite QuerySegment, make Jieba::CutForSearch behaves the same as [jieba] cut_for_search api

remove Jieba::SetQuerySegmentThreshold
This commit is contained in:
yanyiwu 2016-05-02 16:18:36 +08:00
parent 3f0faec14b
commit 5ac9e48eb0
7 changed files with 76 additions and 76 deletions

View File

@ -1,5 +1,10 @@
# CppJieba ChangeLog
## next version
+ rewrite QuerySegment, make `Jieba::CutForSearch` behaves the same as [jieba] `cut_for_search` api
+ remove Jieba::SetQuerySegmentThreshold
## v4.7.0
api changes:
@ -216,3 +221,4 @@ upgrade:
[husky]:http://github.com/yanyiwu/husky.git
[issue50]:https://github.com/yanyiwu/cppjieba/issues/50
[qinwf]:https://github.com/yanyiwu/cppjieba/pull/53#issuecomment-176264929
[jieba]:https://github.com/fxsjy/jieba

View File

@ -55,18 +55,20 @@ make test
```
[demo] Cut With HMM
我/是/拖拉机/学院/手扶拖拉机/专业/的/。/不用/多久//我/就/会/升职/加薪//当上/CEO//走上/人生/巅峰/。
他/来到/了/网易/杭研/大厦
[demo] Cut Without HMM
我/是/拖拉机/学院/手扶拖拉机/专业/的/。/不用/多久//我/就/会/升职/加薪//当/上/C/E/O//走上/人生/巅峰/。
他/来到/了/网易/杭/研/大厦
我来到北京清华大学
[demo] CutAll
我/是/拖拉/拖拉机/学院/手扶/手扶拖拉机/拖拉/拖拉机/专业/的/。/不用/多久//我/就/会升/升职/加薪//当上/C/E/O//走上/人生/巅峰/。
我/来到/北京/清华/清华大学/华大/大学
小明硕士毕业于中国科学院计算所,后在日本京都大学深造
[demo] CutForSearch
我/是/拖拉机/学院/手扶/手扶拖拉机/拖拉/拖拉机/专业/的/。/不用/多久//我/就/会/升职/加薪//当上/CEO//走上/人生/巅峰/。
小明/硕士/毕业/于/中国/科学/学院/科学院/中国科学院/计算/计算所//后/在/日本/京都/大学/日本京都大学/深造
[demo] Insert User Word
男默/女泪
男默女泪
[demo] CutForSearch Word With Offset
[{"word": "南京市", "offset": 0}, {"word": "长江", "offset": 9}, {"word": "长江大桥", "offset": 9}, {"word": "大桥", "offset": 15}]
[{"word": "小明", "offset": 0}, {"word": "硕士", "offset": 6}, {"word": "毕业", "offset": 12}, {"word": "于", "offset": 18}, {"word": "中国", "offset": 21}, {"word": "科学", "offset": 27}, {"word": "学院", "offset": 30}, {"word": "科学院", "offset": 27}, {"word": "中国科学院", "offset": 21}, {"word": "计算", "offset": 36}, {"word": "计算所", "offset": 36}, {"word": "", "offset": 45}, {"word": "后", "offset": 48}, {"word": "在", "offset": 51}, {"word": "日本", "offset": 54}, {"word": "京都", "offset": 60}, {"word": "大学", "offset": 66}, {"word": "日本京都大学", "offset": 54}, {"word": "深造", "offset": 72}]
[demo] Tagging
我是拖拉机学院手扶拖拉机专业的。不用多久我就会升职加薪当上CEO走上人生巅峰。
[我:r, 是:v, 拖拉机:n, 学院:n, 手扶拖拉机:n, 专业:n, 的:uj, 。:x, 不用:v, 多久:m, :x, 我:r, 就:d, 会:v, 升职:v, 加薪:nr, :x, 当上:t, CEO:eng, :x, 走上:v, 人生:n, 巅峰:n, 。:x]

View File

@ -47,18 +47,20 @@ Output:
```
[demo] Cut With HMM
我/是/拖拉机/学院/手扶拖拉机/专业/的/。/不用/多久//我/就/会/升职/加薪//当上/CEO//走上/人生/巅峰/。
他/来到/了/网易/杭研/大厦
[demo] Cut Without HMM
我/是/拖拉机/学院/手扶拖拉机/专业/的/。/不用/多久//我/就/会/升职/加薪//当/上/C/E/O//走上/人生/巅峰/。
他/来到/了/网易/杭/研/大厦
我来到北京清华大学
[demo] CutAll
我/是/拖拉/拖拉机/学院/手扶/手扶拖拉机/拖拉/拖拉机/专业/的/。/不用/多久//我/就/会升/升职/加薪//当上/C/E/O//走上/人生/巅峰/。
我/来到/北京/清华/清华大学/华大/大学
小明硕士毕业于中国科学院计算所,后在日本京都大学深造
[demo] CutForSearch
我/是/拖拉机/学院/手扶/手扶拖拉机/拖拉/拖拉机/专业/的/。/不用/多久//我/就/会/升职/加薪//当上/CEO//走上/人生/巅峰/。
小明/硕士/毕业/于/中国/科学/学院/科学院/中国科学院/计算/计算所//后/在/日本/京都/大学/日本京都大学/深造
[demo] Insert User Word
男默/女泪
男默女泪
[demo] CutForSearch Word With Offset
[{"word": "南京市", "offset": 0}, {"word": "长江", "offset": 9}, {"word": "长江大桥", "offset": 9}, {"word": "大桥", "offset": 15}]
[{"word": "小明", "offset": 0}, {"word": "硕士", "offset": 6}, {"word": "毕业", "offset": 12}, {"word": "于", "offset": 18}, {"word": "中国", "offset": 21}, {"word": "科学", "offset": 27}, {"word": "学院", "offset": 30}, {"word": "科学院", "offset": 27}, {"word": "中国科学院", "offset": 21}, {"word": "计算", "offset": 36}, {"word": "计算所", "offset": 36}, {"word": "", "offset": 45}, {"word": "后", "offset": 48}, {"word": "在", "offset": 51}, {"word": "日本", "offset": 54}, {"word": "京都", "offset": 60}, {"word": "大学", "offset": 66}, {"word": "日本京都大学", "offset": 54}, {"word": "深造", "offset": 72}]
[demo] Tagging
我是拖拉机学院手扶拖拉机专业的。不用多久我就会升职加薪当上CEO走上人生巅峰。
[我:r, 是:v, 拖拉机:n, 学院:n, 手扶拖拉机:n, 专业:n, 的:uj, 。:x, 不用:v, 多久:m, :x, 我:r, 就:d, 会:v, 升职:v, 加薪:nr, :x, 当上:t, CEO:eng, :x, 走上:v, 人生:n, 巅峰:n, 。:x]

View File

@ -74,9 +74,6 @@ class Jieba {
return &model_;
}
void SetQuerySegmentThreshold(size_t len) {
query_seg_.SetMaxWordLen(len);
}
private:
DictTrie dict_trie_;
HMMModel model_;

View File

@ -15,14 +15,12 @@
namespace cppjieba {
class QuerySegment: public SegmentBase {
public:
QuerySegment(const string& dict, const string& model, const string& userDict = "", size_t maxWordLen = 4)
QuerySegment(const string& dict, const string& model, const string& userDict = "")
: mixSeg_(dict, model, userDict),
fullSeg_(mixSeg_.GetDictTrie()),
maxWordLen_(maxWordLen) {
assert(maxWordLen_);
trie_(mixSeg_.GetDictTrie()) {
}
QuerySegment(const DictTrie* dictTrie, const HMMModel* model, size_t maxWordLen = 4)
: mixSeg_(dictTrie, model), fullSeg_(dictTrie), maxWordLen_(maxWordLen) {
QuerySegment(const DictTrie* dictTrie, const HMMModel* model)
: mixSeg_(dictTrie, model), trie_(dictTrie) {
}
~QuerySegment() {
}
@ -51,26 +49,25 @@ class QuerySegment: public SegmentBase {
vector<WordRange> fullRes;
for (vector<WordRange>::const_iterator mixResItr = mixRes.begin(); mixResItr != mixRes.end(); mixResItr++) {
// if it's too long, Cut with fullSeg_, put fullRes in res
if (mixResItr->Length() > maxWordLen_ && !mixResItr->IsAllAscii()) {
fullSeg_.Cut(mixResItr->left, mixResItr->right + 1, fullRes);
for (vector<WordRange>::const_iterator fullResItr = fullRes.begin(); fullResItr != fullRes.end(); fullResItr++) {
res.push_back(*fullResItr);
if (mixResItr->Length() > 2) {
for (size_t i = 0; i + 1 < mixResItr->Length(); i++) {
WordRange wr(mixResItr->left + i, mixResItr->left + i + 1);
if (trie_->Find(wr.left, wr.right + 1) != NULL) {
res.push_back(wr);
}
}
}
if (mixResItr->Length() > 3) {
for (size_t i = 0; i + 2 < mixResItr->Length(); i++) {
WordRange wr(mixResItr->left + i, mixResItr->left + i + 2);
if (trie_->Find(wr.left, wr.right + 1) != NULL) {
res.push_back(wr);
}
}
}
//clear tmp res
fullRes.clear();
} else { // just use the mix result
res.push_back(*mixResItr);
}
}
}
void SetMaxWordLen(size_t len) {
maxWordLen_ = len;
}
size_t GetMaxWordLen() const {
return maxWordLen_;
}
private:
bool IsAllAscii(const Unicode& s) const {
for(size_t i = 0; i < s.size(); i++) {
@ -81,8 +78,7 @@ class QuerySegment: public SegmentBase {
return true;
}
MixSegment mixSeg_;
FullSegment fullSeg_;
size_t maxWordLen_;
const DictTrie* trie_;
}; // QuerySegment
} // namespace cppjieba

View File

@ -15,9 +15,11 @@ int main(int argc, char** argv) {
USER_DICT_PATH);
vector<string> words;
vector<cppjieba::Word> jiebawords;
string s;
string result;
string s = "我是拖拉机学院手扶拖拉机专业的。不用多久我就会升职加薪当上CEO走上人生巅峰。";
s = "他来到了网易杭研大厦";
cout << s << endl;
cout << "[demo] Cut With HMM" << endl;
jieba.Cut(s, words, true);
cout << limonp::Join(words.begin(), words.end(), "/") << endl;
@ -26,10 +28,14 @@ int main(int argc, char** argv) {
jieba.Cut(s, words, false);
cout << limonp::Join(words.begin(), words.end(), "/") << endl;
s = "我来到北京清华大学";
cout << s << endl;
cout << "[demo] CutAll" << endl;
jieba.CutAll(s, words);
cout << limonp::Join(words.begin(), words.end(), "/") << endl;
s = "小明硕士毕业于中国科学院计算所,后在日本京都大学深造";
cout << s << endl;
cout << "[demo] CutForSearch" << endl;
jieba.CutForSearch(s, words);
cout << limonp::Join(words.begin(), words.end(), "/") << endl;
@ -42,12 +48,12 @@ int main(int argc, char** argv) {
cout << limonp::Join(words.begin(), words.end(), "/") << endl;
cout << "[demo] CutForSearch Word With Offset" << endl;
jieba.SetQuerySegmentThreshold(3);
jieba.CutForSearch("南京市长江大桥", jiebawords, true);
jieba.CutForSearch(s, jiebawords, true);
cout << jiebawords << endl;
cout << "[demo] Tagging" << endl;
vector<pair<string, string> > tagres;
s = "我是拖拉机学院手扶拖拉机专业的。不用多久我就会升职加薪当上CEO走上人生巅峰。";
jieba.Tag(s, tagres);
cout << s << endl;
cout << tagres << endl;;

View File

@ -197,61 +197,52 @@ TEST(FullSegment, Test1) {
}
TEST(QuerySegment, Test1) {
QuerySegment segment("../test/testdata/extra_dict/jieba.dict.small.utf8", "../dict/hmm_model.utf8", "", 3);
const char* str = "小明硕士毕业于中国科学院计算所,后在日本京都大学深造";
QuerySegment segment("../dict/jieba.dict.utf8", "../dict/hmm_model.utf8", "");
vector<string> words;
segment.Cut(str, words);
string s1, s2;
s1 << words;
s2 = "[\"小明\", \"硕士\", \"毕业\", \"\", \"中国\", \"中国科学院\", \"科学\", \"科学院\", \"学院\", \"计算所\", \"\", \"\", \"\", \"日本\", \"京都\", \"京都大学\", \"大学\", \"深造\"]";
segment.Cut("小明硕士毕业于中国科学院计算所,后在日本京都大学深造", words);
s1 = Join(words.begin(), words.end(), "/");
s2 = "小明/硕士/毕业/于/中国/科学/学院/科学院/中国科学院/计算/计算所//后/在/日本/京都/大学/日本京都大学/深造";
ASSERT_EQ(s1, s2);
segment.Cut("亲口交代", words);
s1 = Join(words.begin(), words.end(), "/");
s2 = "亲口/交代";
ASSERT_EQ(s1, s2);
segment.Cut("他心理健康", words);
s1 = Join(words.begin(), words.end(), "/");
s2 = "他/心理/健康/心理健康";
ASSERT_EQ(s1, s2);
}
TEST(QuerySegment, Test2) {
QuerySegment segment("../test/testdata/extra_dict/jieba.dict.small.utf8", "../dict/hmm_model.utf8", "../test/testdata/userdict.utf8|../test/testdata/userdict.english", 3);
QuerySegment segment("../test/testdata/extra_dict/jieba.dict.small.utf8", "../dict/hmm_model.utf8", "../test/testdata/userdict.utf8|../test/testdata/userdict.english");
vector<string> words;
string s1, s2;
{
const char* str = "小明硕士毕业于中国科学院计算所,后在日本京都大学深造";
vector<string> words;
segment.Cut(str, words);
string s1, s2;
s1 << words;
s2 = "[\"小明\", \"硕士\", \"毕业\", \"\", \"中国\", \"中国科学院\", \"科学\", \"科学院\", \"学院\", \"计算所\", \"\", \"\", \"\", \"日本\", \"京都\", \"京都大学\", \"大学\", \"深造\"]";
segment.Cut("小明硕士毕业于中国科学院计算所,后在日本京都大学深造", words);
s1 = Join(words.begin(), words.end(), "/");
s2 = "小明/硕士/毕业/于/中国/科学/学院/科学院/中国科学院/计算/计算所//后/在/日本/京都/大学/京都大学/深造";
ASSERT_EQ(s1, s2);
}
{
const char* str = "小明硕士毕业于中国科学院计算所iPhone6";
vector<string> words;
segment.Cut(str, words);
string s1, s2;
s1 << words;
s2 = "[\"小明\", \"硕士\", \"毕业\", \"\", \"中国\", \"中国科学院\", \"科学\", \"科学院\", \"学院\", \"计算所\", \"iPhone6\"]";
segment.Cut("小明硕士毕业于中国科学院计算所iPhone6", words);
s1 = Join(words.begin(), words.end(), "/");
s2 = "小明/硕士/毕业/于/中国/科学/学院/科学院/中国科学院/计算/计算所/iPhone6";
ASSERT_EQ(s1, s2);
}
{
vector<string> words;
segment.Cut("internal", words);
string s = Join(words.begin(), words.end(), "/");
ASSERT_EQ("internal", s);
}
segment.SetMaxWordLen(5);
{
vector<string> words;
segment.Cut("中国科学院", words);
string s = Join(words.begin(), words.end(), "/");
ASSERT_EQ("中国科学院", s);
s1 = Join(words.begin(), words.end(), "/");
s2 = "中国/科学/学院/科学院/中国科学院";
ASSERT_EQ(s1, s2);
}
}
TEST(MPSegmentTest, Unicode32) {