Skip to content

Commit

Permalink
modified: EduNLP/SIF/tokenization/text/tokenization.py
Browse files Browse the repository at this point in the history
    modified:   tests/test_tokenizer/test_tokenizer.py
  • Loading branch information
KINGNEWBLUSH committed Mar 11, 2024
1 parent 025fa86 commit aea99a2
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 6 deletions.
8 changes: 4 additions & 4 deletions EduNLP/SIF/tokenization/text/tokenization.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def tokenize(text,
token for token in word_tokenize(text)
if token not in stopwords and token.strip()
]
except OSError:
except LookupError:
nltk.download('punkt')
return [
token for token in word_tokenize(text)
Expand All @@ -87,7 +87,7 @@ def tokenize(text,
elif (tokenizer == 'spacy'):
try:
spacy_tokenizer = spacy.load(tok_model)
except OSError:
except LookupError:
spacy.cli.download(tok_model)
spacy_tokenizer = spacy.load(tok_model)

Expand All @@ -101,9 +101,9 @@ def tokenize(text,
huggingface_tokenizer.models.BPE())
try:
tokenizer.load(bpe_json, pretty=True)
except OSError:
except LookupError:
if (bpe_trainfile is None):
raise OSError("bpe train file not found, using %s." %
raise LookupError("bpe train file not found, using %s." %
bpe_trainfile)
trainer = BpeTrainer(
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
Expand Down
20 changes: 18 additions & 2 deletions tests/test_tokenizer/test_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,28 @@ def test_Tokenizer():
]
for tok in ['nltk', 'spacy']:
tokenizer = get_tokenizer("pure_text",
stop_words=set(",?"),
text_params={"tokenizer": tok})
text_params={"tokenizer": tok, "stop_words":set(",?")})
tokens = tokenizer(items)
ret = next(tokens)
assert ret == ans

def test_TokenizerBPE():
items = ['The stationery store has $600$ exercise books, and after selling some,\
there are still $4$ packs left, $25$ each, how many are sold?']
ans = [
['h', 'e', ' ', 'st', 'at', 'io', 'n', 'er', 'y', ' ', 'st', 'o', 're', ' ',
'h', 'as', ' $', '6', '00', '$ ', 'e', 'x', 'er', 'ci', 's', 'e', ' b', 'o',
'o', 'k', 's', ', ', 'an', 'd', ' a', 'ft', 'er', ' ', 's', 'e', 'l', 'l',
'in', 'g', ' ', 's', 'ome', ', ', 't', 'h', 'e', 're', ' ', 'are', ' ',
'st', 'i', 'l', 'l', ' $', '4', '$ ', 'p', 'a', 'c', 'k', 's', ' ', 'left',
', ', '$', '25', '$ ', 'e', 'a', 'c', 'h', ', ', 'h', 'ow', ' m', 'an', 'y',
' ', 'are', ' ', 's', 'o', 'l', 'd']
]
tokenizer = get_tokenizer("pure_text",
text_params={"tokenizer": 'bpe', "bpe_trainfile":"../../static/test_data/standard_luna_data.json", "stop_words":set(",?")})
tokens = tokenizer(items)
ret = next(tokens)
assert ret == ans

def test_SpaceTokenizer():
items = ['文具店有 $600$ 本练习本,卖出一些后,还剩 $4$ 包,每包 $25$ 本,卖出多少本?']
Expand Down

0 comments on commit aea99a2

Please sign in to comment.