Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

updata CPMTokenizer #553

Merged
merged 2 commits into from
Jun 2, 2023
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
updata CPMTokenizer
  • Loading branch information
FLoutione committed Jun 2, 2023
commit 2ac0828824b5806d6eb0bcbe44d27090161745eb
40 changes: 31 additions & 9 deletions mindnlp/transforms/tokenizers/cpm_tokenizer.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@

# pylint: disable=C0301
# pylint: disable=R0913
"""
GPT2Tokenizer
"""
import os
import numpy as np
from mindspore.dataset.text.transforms import Implementation
from tokenizers import Tokenizer
from mindnlp.abc import PreTrainedTokenizer
from tokenizers import Tokenizer

PRETRAINED_VOCAB_MAP = {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/tokenizer.json"
}


class CPMTokenizer(PreTrainedTokenizer):
"""
Tokenizer used for GPT2 text process.
Expand All @@ -23,21 +26,40 @@ class CPMTokenizer(PreTrainedTokenizer):
pretrained_vocab_map = PRETRAINED_VOCAB_MAP

def __init__(
self,
tokenizer_file=None,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
**kwargs
self,
tokenizer_file=None,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
sep_token="<sep>",
pad_token="<pad>",
cls_token="<cls>",
mask_token="<mask>",
eop_token="<eop>",
eod_token="<eod>",
add_prefix_space=False,
**kwargs
):
super().__init__(
tokenizer_file=tokenizer_file,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
eop_token=eop_token,
eod_token=eod_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
**kwargs)

if isinstance(tokenizer_file, str):
if not os.path.isfile(tokenizer_file):
raise ValueError(f"{tokenizer_file} is not a file.")
else:
raise ValueError(f'only support tokenizer class from mindspore or mindnlp, but got {tokenizer_file}')

return_token = kwargs.pop('return_token', False)

if isinstance(tokenizer_file, str):
Expand Down