Skip to content

Commit

Permalink
add longformer tokenizer (#502)
Browse files Browse the repository at this point in the history
  • Loading branch information
Yicorner authored May 13, 2023
1 parent 1a2f240 commit 05f8538
Show file tree
Hide file tree
Showing 4 changed files with 139 additions and 1 deletion.
2 changes: 2 additions & 0 deletions mindnlp/models/longformer/longformer_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
from typing import List, Union
from mindnlp.abc import PreTrainedConfig

LONGFORMER_SUPPORT_LIST = ["allenai/longformer-base-4096"]

class LongformerConfig(PreTrainedConfig):
r"""
Example:
Expand Down
3 changes: 2 additions & 1 deletion mindnlp/transforms/tokenizers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@
from .gpt_tokenizer import GPTTokenizer
from .codegen_tokenizer import CodeGenTokenizer
from .roberta_tokenizer import RobertaTokenizer
from .longformer_tokenizer import LongformerTokenizer

__all__ = ['BasicTokenizer', 'BertTokenizer', 'T5Tokenizer', 'GPTTokenizer', 'GPT2Tokenizer', 'CodeGenTokenizer',
'RobertaTokenizer'
'RobertaTokenizer', 'LongformerTokenizer'
]
104 changes: 104 additions & 0 deletions mindnlp/transforms/tokenizers/longformer_tokenizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
T5Tokenizer
"""

import numpy as np
from mindspore.dataset.text.transforms import Implementation
from tokenizers import Tokenizer
from mindnlp.abc import PreTrainedTokenizer
from mindnlp.models.longformer.longformer_config import LONGFORMER_SUPPORT_LIST
from mindnlp.configs import HF_TOKENIZER_CONFIG_URL_BASE

PRETRAINED_VOCAB_MAP = {
model: HF_TOKENIZER_CONFIG_URL_BASE.format(model) for model in LONGFORMER_SUPPORT_LIST
}

PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}

class LongformerTokenizer(PreTrainedTokenizer):
"""
Tokenizer used for T5 text process.
Args:
vocab (Vocab): Vocabulary used to look up words.
return_token (bool): Whether to return token. If True: return tokens. False: return ids. Default: True.
Examples:
>>> from mindspore.dataset import text
>>> from mindnlp.transforms import T5Tokenizer
>>> text = "Believing that faith can triumph over everything is in itself the greatest belief"
>>> tokenizer = T5Tokenizer.from_pretrained('t5-base')
>>> tokens = tokenizer.encode(text)
"""

max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_map = PRETRAINED_VOCAB_MAP

def __init__(self, vocab: str, **kwargs):
super().__init__()
return_token = kwargs.pop('return_token', False)

if isinstance(vocab, str):
self.tokenizer = Tokenizer.from_file(vocab)
else:
raise ValueError(f'only support string, but got {vocab}')
self.return_token = return_token
self.implementation = Implementation.PY

def __call__(self, text_input):
"""
Call method for input conversion for eager mode with C++ implementation.
"""
if isinstance(text_input, str):
text_input = np.array(text_input)
elif not isinstance(text_input, np.ndarray):
raise TypeError(
f"Input should be a text line in 1-D NumPy format, got {type(text_input)}.")
return super().__call__(text_input)

def execute_py(self, text_input):
"""
Execute method.
"""
return self._execute_py(text_input)

def _execute_py(self, text_input):
"""
Execute method.
"""
text_input = self._convert_to_unicode(text_input)
tokens = self.tokenizer.encode(text_input)
if self.return_token is True:
return np.array(tokens.tokens)
return np.array(tokens.ids)

def _convert_to_unicode(self, text_input):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text_input, str):
return text_input
if isinstance(text_input, bytes):
return text_input.decode("utf-8", "ignore")
if isinstance(text_input, np.ndarray):
if text_input.dtype.type is np.bytes_:
text_input = np.char.decode(text_input, "utf-8")
return str(text_input)
raise ValueError(f"Unsupported string type: {type(text_input)}, {text_input.dtype}")

31 changes: 31 additions & 0 deletions tests/ut/transforms/test_longformer_tokenizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test the T5Tokenizer"""

import mindspore as ms
from mindspore.dataset import GeneratorDataset
from mindnlp.transforms import LongformerTokenizer

def test_longformer_tokenizer_op():
"""test T5Tokenizer from pretrained."""
texts = ['i make a small mistake when i\'m working!']
test_dataset = GeneratorDataset(texts, 'text')

tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096', return_token=True)
test_dataset = test_dataset.map(operations=tokenizer)
dataset_after = next(test_dataset.create_tuple_iterator())[0]

assert len(dataset_after) == 12
assert dataset_after.dtype == ms.string

0 comments on commit 05f8538

Please sign in to comment.