From 953e8ffbfaf81390db621156900ee76db1d6d4a8 Mon Sep 17 00:00:00 2001 From: suluyan Date: Thu, 16 Jun 2022 19:34:36 +0800 Subject: [PATCH 1/7] fill mask --- modelscope/models/nlp/__init__.py | 1 + .../models/nlp/masked_language_model.py | 43 +++++++++ modelscope/pipelines/builder.py | 2 + modelscope/pipelines/nlp/__init__.py | 1 + .../pipelines/nlp/fill_mask_pipeline.py | 57 ++++++++++++ modelscope/preprocessors/nlp.py | 62 ++++++++++++- modelscope/utils/constant.py | 2 +- tests/pipelines/test_fill_mask.py | 87 +++++++++++++++++++ 8 files changed, 253 insertions(+), 2 deletions(-) create mode 100644 modelscope/models/nlp/masked_language_model.py create mode 100644 modelscope/pipelines/nlp/fill_mask_pipeline.py create mode 100644 tests/pipelines/test_fill_mask.py diff --git a/modelscope/models/nlp/__init__.py b/modelscope/models/nlp/__init__.py index be675c1b..2c6c6ba2 100644 --- a/modelscope/models/nlp/__init__.py +++ b/modelscope/models/nlp/__init__.py @@ -1,3 +1,4 @@ from .sentence_similarity_model import * # noqa F403 from .sequence_classification_model import * # noqa F403 from .text_generation_model import * # noqa F403 +from .masked_language_model import * # noqa F403 diff --git a/modelscope/models/nlp/masked_language_model.py b/modelscope/models/nlp/masked_language_model.py new file mode 100644 index 00000000..76677040 --- /dev/null +++ b/modelscope/models/nlp/masked_language_model.py @@ -0,0 +1,43 @@ +from typing import Any, Dict, Optional, Union + +import numpy as np +from ..base import Model, Tensor +from ..builder import MODELS +from ...utils.constant import Tasks + +__all__ = ['MaskedLanguageModel'] + + +@MODELS.register_module(Tasks.fill_mask, module_name=r'sbert') +class MaskedLanguageModel(Model): + + def __init__(self, model_dir: str, *args, **kwargs): + from sofa.utils.backend import AutoConfig, AutoModelForMaskedLM + self.model_dir = model_dir + super().__init__(model_dir, *args, **kwargs) + + self.config = AutoConfig.from_pretrained(model_dir) + self.model = AutoModelForMaskedLM.from_pretrained(model_dir, config=self.config) + + + def forward(self, inputs: Dict[str, Tensor]) -> Dict[str, np.ndarray]: + """return the result by the model + + Args: + input (Dict[str, Any]): the preprocessed data + + Returns: + Dict[str, np.ndarray]: results + Example: + { + 'predictions': array([1]), # lable 0-negative 1-positive + 'probabilities': array([[0.11491239, 0.8850876 ]], dtype=float32), + 'logits': array([[-0.53860897, 1.5029076 ]], dtype=float32) # true value + } + """ + rst = self.model( + input_ids=inputs["input_ids"], + attention_mask=inputs['attention_mask'], + token_type_ids=inputs["token_type_ids"] + ) + return {'logits': rst['logits'], 'input_ids': inputs['input_ids']} diff --git a/modelscope/pipelines/builder.py b/modelscope/pipelines/builder.py index d4ad0c3f..38172c99 100644 --- a/modelscope/pipelines/builder.py +++ b/modelscope/pipelines/builder.py @@ -24,6 +24,8 @@ DEFAULT_MODEL_FOR_PIPELINE = { Tasks.image_generation: ('person-image-cartoon', 'damo/cv_unet_person-image-cartoon_compound-models'), + Tasks.fill_mask: + ('sbert') } diff --git a/modelscope/pipelines/nlp/__init__.py b/modelscope/pipelines/nlp/__init__.py index 1f15a7b8..3151b138 100644 --- a/modelscope/pipelines/nlp/__init__.py +++ b/modelscope/pipelines/nlp/__init__.py @@ -1,3 +1,4 @@ from .sentence_similarity_pipeline import * # noqa F403 from .sequence_classification_pipeline import * # noqa F403 from .text_generation_pipeline import * # noqa F403 +from .fill_mask_pipeline import * # noqa F403 diff --git a/modelscope/pipelines/nlp/fill_mask_pipeline.py b/modelscope/pipelines/nlp/fill_mask_pipeline.py new file mode 100644 index 00000000..ac22cf3b --- /dev/null +++ b/modelscope/pipelines/nlp/fill_mask_pipeline.py @@ -0,0 +1,57 @@ +from typing import Dict + +from modelscope.models.nlp import MaskedLanguageModel +from modelscope.preprocessors import FillMaskPreprocessor +from modelscope.utils.constant import Tasks +from ..base import Pipeline, Tensor +from ..builder import PIPELINES + +__all__ = ['FillMaskPipeline'] + + +@PIPELINES.register_module(Tasks.fill_mask, module_name=r'sbert') +class FillMaskPipeline(Pipeline): + + def __init__(self, model: MaskedLanguageModel, + preprocessor: FillMaskPreprocessor, **kwargs): + """use `model` and `preprocessor` to create a nlp text classification pipeline for prediction + + Args: + model (SequenceClassificationModel): a model instance + preprocessor (SequenceClassificationPreprocessor): a preprocessor instance + """ + + super().__init__(model=model, preprocessor=preprocessor, **kwargs) + self.preprocessor = preprocessor + self.tokenizer = preprocessor.tokenizer + self.mask_id = {'veco': 250001, 'sbert': 103} + + def postprocess(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]: + """process the prediction results + + Args: + inputs (Dict[str, Any]): _description_ + + Returns: + Dict[str, str]: the prediction results + """ + import numpy as np + logits = inputs["logits"].detach().numpy() + input_ids = inputs["input_ids"].detach().numpy() + pred_ids = np.argmax(logits, axis=-1) + rst_ids = np.where(input_ids==self.mask_id[self.model.config.model_type], pred_ids, input_ids) + pred_strings = [] + for ids in rst_ids: + if self.model.config.model_type == 'veco': + pred_string = self.tokenizer.decode(ids).split('')[0].replace("", "").replace("", "").replace("", "") + elif self.model.config.vocab_size == 21128: # zh bert + pred_string = self.tokenizer.convert_ids_to_tokens(ids) + pred_string = ''.join(pred_string).replace('##','') + pred_string = pred_string.split('[SEP]')[0].replace('[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') + else: ## en bert + pred_string = self.tokenizer.decode(ids) + pred_string = pred_string.split('[SEP]')[0].replace('[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') + pred_strings.append(pred_string) + + return {'pred_string': pred_strings} + diff --git a/modelscope/preprocessors/nlp.py b/modelscope/preprocessors/nlp.py index 6773eadf..952e7b63 100644 --- a/modelscope/preprocessors/nlp.py +++ b/modelscope/preprocessors/nlp.py @@ -12,7 +12,8 @@ from .builder import PREPROCESSORS __all__ = [ 'Tokenize', 'SequenceClassificationPreprocessor', - 'TextGenerationPreprocessor' + 'TextGenerationPreprocessor', + 'FillMaskPreprocessor' ] @@ -166,8 +167,67 @@ class TextGenerationPreprocessor(Preprocessor): truncation=True, max_length=max_seq_length) + rst['input_ids'].append(feature['input_ids']) + rst['attention_mask'].append(feature['attention_mask']) + rst['token_type_ids'].append(feature['token_type_ids']) + return {k: torch.tensor(v) for k, v in rst.items()} + + +@PREPROCESSORS.register_module( + Fields.nlp, module_name=r'sbert') +class FillMaskPreprocessor(Preprocessor): + + def __init__(self, model_dir: str, *args, **kwargs): + """preprocess the data via the vocab.txt from the `model_dir` path + + Args: + model_dir (str): model path + """ + super().__init__(*args, **kwargs) + from sofa.utils.backend import AutoTokenizer + self.model_dir = model_dir + self.first_sequence: str = kwargs.pop('first_sequence', + 'first_sequence') + self.sequence_length = kwargs.pop('sequence_length', 128) + + self.tokenizer = AutoTokenizer.from_pretrained(model_dir) + + @type_assert(object, str) + def __call__(self, data: str) -> Dict[str, Any]: + """process the raw input data + + Args: + data (str): a sentence + Example: + 'you are so handsome.' + + Returns: + Dict[str, Any]: the preprocessed data + """ + import torch + + new_data = {self.first_sequence: data} + # preprocess the data for the model input + + rst = { + 'input_ids': [], + 'attention_mask': [], + 'token_type_ids': [] + } + + max_seq_length = self.sequence_length + + text_a = new_data[self.first_sequence] + feature = self.tokenizer( + text_a, + padding='max_length', + truncation=True, + max_length=max_seq_length, + return_token_type_ids=True) + rst['input_ids'].append(feature['input_ids']) rst['attention_mask'].append(feature['attention_mask']) rst['token_type_ids'].append(feature['token_type_ids']) return {k: torch.tensor(v) for k, v in rst.items()} + diff --git a/modelscope/utils/constant.py b/modelscope/utils/constant.py index 6ce835c5..3bc4548b 100644 --- a/modelscope/utils/constant.py +++ b/modelscope/utils/constant.py @@ -42,7 +42,7 @@ class Tasks(object): table_question_answering = 'table-question-answering' feature_extraction = 'feature-extraction' sentence_similarity = 'sentence-similarity' - fill_mask = 'fill-mask ' + fill_mask = 'fill-mask' summarization = 'summarization' question_answering = 'question-answering' diff --git a/tests/pipelines/test_fill_mask.py b/tests/pipelines/test_fill_mask.py new file mode 100644 index 00000000..12f7ff3b --- /dev/null +++ b/tests/pipelines/test_fill_mask.py @@ -0,0 +1,87 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os +import shutil +import unittest + +from maas_hub.snapshot_download import snapshot_download + +from modelscope.models.nlp import MaskedLanguageModel +from modelscope.pipelines import FillMaskPipeline, pipeline +from modelscope.preprocessors import FillMaskPreprocessor +from modelscope.utils.constant import Tasks +from modelscope.models import Model +from modelscope.utils.hub import get_model_cache_dir +from modelscope.utils.test_utils import test_level + +class FillMaskTest(unittest.TestCase): + model_id_sbert = {'zh': 'damo/nlp_structbert_fill-mask-chinese_large', + 'en': 'damo/nlp_structbert_fill-mask-english_large'} + model_id_veco = 'damo/nlp_veco_fill-mask_large' + + ori_texts = {"zh": "段誉轻挥折扇,摇了摇头,说道:“你师父是你的师父,你师父可不是我的师父。你师父差得动你,你师父可差不动我。", + "en": "Everything in what you call reality is really just a reflection of your consciousness. Your whole universe is just a mirror reflection of your story."} + + test_inputs = {"zh": "段誉轻[MASK]折扇,摇了摇[MASK],[MASK]道:“你师父是你的[MASK][MASK],你师父可不是[MASK]的师父。你师父差得动你,你师父可[MASK]不动我。", + "en": "Everything in [MASK] you call reality is really [MASK] a reflection of your [MASK]. Your whole universe is just a mirror [MASK] of your story."} + + #def test_run(self): + # # sbert + # for language in ["zh", "en"]: + # model_dir = snapshot_download(self.model_id_sbert[language]) + # preprocessor = FillMaskPreprocessor( + # model_dir, first_sequence='sentence', second_sequence=None) + # model = MaskedLanguageModel(model_dir) + # pipeline1 = FillMaskPipeline(model, preprocessor) + # pipeline2 = pipeline( + # Tasks.fill_mask, model=model, preprocessor=preprocessor) + # ori_text = self.ori_texts[language] + # test_input = self.test_inputs[language] + # print( + # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: {pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' + # ) + + ## veco + #model_dir = snapshot_download(self.model_id_veco) + #preprocessor = FillMaskPreprocessor( + # model_dir, first_sequence='sentence', second_sequence=None) + #model = MaskedLanguageModel(model_dir) + #pipeline1 = FillMaskPipeline(model, preprocessor) + #pipeline2 = pipeline( + # Tasks.fill_mask, model=model, preprocessor=preprocessor) + #for language in ["zh", "en"]: + # ori_text = self.ori_texts[language] + # test_input = self.test_inputs["zh"].replace("[MASK]", "") + # print( + # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: {pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' + + + def test_run_with_model_from_modelhub(self): + for language in ["zh"]: + print(self.model_id_sbert[language]) + model = Model.from_pretrained(self.model_id_sbert[language]) + print("model", model.model_dir) + preprocessor = FillMaskPreprocessor( + model.model_dir, first_sequence='sentence', second_sequence=None) + pipeline_ins = pipeline( + task=Tasks.fill_mask, model=model, preprocessor=preprocessor) + print(pipeline_ins(self_test_inputs[language])) + + + #def test_run_with_model_name(self): + ## veco + #pipeline_ins = pipeline( + # task=Tasks.fill_mask, model=self.model_id_veco) + #for language in ["zh", "en"]: + # input_ = self.test_inputs[language].replace("[MASK]", "") + # print(pipeline_ins(input_)) + + ## structBert + #for language in ["zh"]: + # pipeline_ins = pipeline( + # task=Tasks.fill_mask, model=self.model_id_sbert[language]) + # print(pipeline_ins(self_test_inputs[language])) + + +if __name__ == '__main__': + unittest.main() + From 951077c729b71f5d245f42dcec82a7a65a028a96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=BA=E4=B8=9E?= Date: Thu, 16 Jun 2022 20:23:15 +0800 Subject: [PATCH 2/7] registry multi models on model and pipeline --- .../models/nlp/masked_language_model.py | 17 ++-- modelscope/pipelines/builder.py | 4 +- .../pipelines/nlp/fill_mask_pipeline.py | 29 +++--- tests/pipelines/test_fill_mask.py | 98 +++++++++++-------- 4 files changed, 87 insertions(+), 61 deletions(-) diff --git a/modelscope/models/nlp/masked_language_model.py b/modelscope/models/nlp/masked_language_model.py index 76677040..cb12a4dd 100644 --- a/modelscope/models/nlp/masked_language_model.py +++ b/modelscope/models/nlp/masked_language_model.py @@ -1,14 +1,16 @@ from typing import Any, Dict, Optional, Union import numpy as np + +from ...utils.constant import Tasks from ..base import Model, Tensor from ..builder import MODELS -from ...utils.constant import Tasks __all__ = ['MaskedLanguageModel'] @MODELS.register_module(Tasks.fill_mask, module_name=r'sbert') +@MODELS.register_module(Tasks.fill_mask, module_name=r'veco') class MaskedLanguageModel(Model): def __init__(self, model_dir: str, *args, **kwargs): @@ -17,8 +19,8 @@ class MaskedLanguageModel(Model): super().__init__(model_dir, *args, **kwargs) self.config = AutoConfig.from_pretrained(model_dir) - self.model = AutoModelForMaskedLM.from_pretrained(model_dir, config=self.config) - + self.model = AutoModelForMaskedLM.from_pretrained( + model_dir, config=self.config) def forward(self, inputs: Dict[str, Tensor]) -> Dict[str, np.ndarray]: """return the result by the model @@ -35,9 +37,8 @@ class MaskedLanguageModel(Model): 'logits': array([[-0.53860897, 1.5029076 ]], dtype=float32) # true value } """ - rst = self.model( - input_ids=inputs["input_ids"], - attention_mask=inputs['attention_mask'], - token_type_ids=inputs["token_type_ids"] - ) + rst = self.model( + input_ids=inputs['input_ids'], + attention_mask=inputs['attention_mask'], + token_type_ids=inputs['token_type_ids']) return {'logits': rst['logits'], 'input_ids': inputs['input_ids']} diff --git a/modelscope/pipelines/builder.py b/modelscope/pipelines/builder.py index 38172c99..dc7b6aa6 100644 --- a/modelscope/pipelines/builder.py +++ b/modelscope/pipelines/builder.py @@ -24,8 +24,8 @@ DEFAULT_MODEL_FOR_PIPELINE = { Tasks.image_generation: ('person-image-cartoon', 'damo/cv_unet_person-image-cartoon_compound-models'), - Tasks.fill_mask: - ('sbert') + Tasks.fill_mask: ('sbert', 'damo/nlp_structbert_fill-mask_chinese-large'), + Tasks.fill_mask: ('veco', 'damo/nlp_veco_fill-mask_large') } diff --git a/modelscope/pipelines/nlp/fill_mask_pipeline.py b/modelscope/pipelines/nlp/fill_mask_pipeline.py index ac22cf3b..aebf4e09 100644 --- a/modelscope/pipelines/nlp/fill_mask_pipeline.py +++ b/modelscope/pipelines/nlp/fill_mask_pipeline.py @@ -10,6 +10,7 @@ __all__ = ['FillMaskPipeline'] @PIPELINES.register_module(Tasks.fill_mask, module_name=r'sbert') +@PIPELINES.register_module(Tasks.fill_mask, module_name=r'veco') class FillMaskPipeline(Pipeline): def __init__(self, model: MaskedLanguageModel, @@ -36,22 +37,28 @@ class FillMaskPipeline(Pipeline): Dict[str, str]: the prediction results """ import numpy as np - logits = inputs["logits"].detach().numpy() - input_ids = inputs["input_ids"].detach().numpy() + logits = inputs['logits'].detach().numpy() + input_ids = inputs['input_ids'].detach().numpy() pred_ids = np.argmax(logits, axis=-1) - rst_ids = np.where(input_ids==self.mask_id[self.model.config.model_type], pred_ids, input_ids) + rst_ids = np.where( + input_ids == self.mask_id[self.model.config.model_type], pred_ids, + input_ids) pred_strings = [] for ids in rst_ids: if self.model.config.model_type == 'veco': - pred_string = self.tokenizer.decode(ids).split('')[0].replace("", "").replace("", "").replace("", "") - elif self.model.config.vocab_size == 21128: # zh bert + pred_string = self.tokenizer.decode(ids).split( + '')[0].replace('', + '').replace('', + '').replace('', '') + elif self.model.config.vocab_size == 21128: # zh bert pred_string = self.tokenizer.convert_ids_to_tokens(ids) - pred_string = ''.join(pred_string).replace('##','') - pred_string = pred_string.split('[SEP]')[0].replace('[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') - else: ## en bert + pred_string = ''.join(pred_string).replace('##', '') + pred_string = pred_string.split('[SEP]')[0].replace( + '[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') + else: ## en bert pred_string = self.tokenizer.decode(ids) - pred_string = pred_string.split('[SEP]')[0].replace('[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') + pred_string = pred_string.split('[SEP]')[0].replace( + '[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') pred_strings.append(pred_string) - return {'pred_string': pred_strings} - + return {'pred_string': pred_strings} diff --git a/tests/pipelines/test_fill_mask.py b/tests/pipelines/test_fill_mask.py index 12f7ff3b..8b021c03 100644 --- a/tests/pipelines/test_fill_mask.py +++ b/tests/pipelines/test_fill_mask.py @@ -5,24 +5,41 @@ import unittest from maas_hub.snapshot_download import snapshot_download +from modelscope.models import Model from modelscope.models.nlp import MaskedLanguageModel from modelscope.pipelines import FillMaskPipeline, pipeline from modelscope.preprocessors import FillMaskPreprocessor from modelscope.utils.constant import Tasks -from modelscope.models import Model from modelscope.utils.hub import get_model_cache_dir from modelscope.utils.test_utils import test_level + class FillMaskTest(unittest.TestCase): - model_id_sbert = {'zh': 'damo/nlp_structbert_fill-mask-chinese_large', - 'en': 'damo/nlp_structbert_fill-mask-english_large'} + model_id_sbert = { + 'zh': 'damo/nlp_structbert_fill-mask-chinese_large', + 'en': 'damo/nlp_structbert_fill-mask-english_large' + } model_id_veco = 'damo/nlp_veco_fill-mask_large' - ori_texts = {"zh": "段誉轻挥折扇,摇了摇头,说道:“你师父是你的师父,你师父可不是我的师父。你师父差得动你,你师父可差不动我。", - "en": "Everything in what you call reality is really just a reflection of your consciousness. Your whole universe is just a mirror reflection of your story."} + ori_texts = { + 'zh': + f'段誉轻挥折扇,摇了摇头,说道:“你师父是你的师父,你师父可不是我的师父。' + f'你师父差得动你,你师父可差不动我。', + 'en': + f'Everything in what you call reality is really just a r' + f'eflection of your consciousness. Your whole universe is' + f'just a mirror reflection of your story.' + } - test_inputs = {"zh": "段誉轻[MASK]折扇,摇了摇[MASK],[MASK]道:“你师父是你的[MASK][MASK],你师父可不是[MASK]的师父。你师父差得动你,你师父可[MASK]不动我。", - "en": "Everything in [MASK] you call reality is really [MASK] a reflection of your [MASK]. Your whole universe is just a mirror [MASK] of your story."} + test_inputs = { + 'zh': + f'段誉轻[MASK]折扇,摇了摇[MASK],[MASK]道:“你师父是你的[MASK][MASK]' + f',你师父可不是[MASK]的师父。你师父差得动你,你师父可[MASK]不动我。', + 'en': + f'Everything in [MASK] you call reality is really [MASK] a ' + f'reflection of your [MASK]. Your whole universe is just a ' + f'mirror [MASK] of your story.' + } #def test_run(self): # # sbert @@ -37,51 +54,52 @@ class FillMaskTest(unittest.TestCase): # ori_text = self.ori_texts[language] # test_input = self.test_inputs[language] # print( - # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: {pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' + # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: ' + # f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' # ) - ## veco - #model_dir = snapshot_download(self.model_id_veco) - #preprocessor = FillMaskPreprocessor( - # model_dir, first_sequence='sentence', second_sequence=None) - #model = MaskedLanguageModel(model_dir) - #pipeline1 = FillMaskPipeline(model, preprocessor) - #pipeline2 = pipeline( - # Tasks.fill_mask, model=model, preprocessor=preprocessor) - #for language in ["zh", "en"]: - # ori_text = self.ori_texts[language] - # test_input = self.test_inputs["zh"].replace("[MASK]", "") - # print( - # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: {pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' - + ## veco + #model_dir = snapshot_download(self.model_id_veco) + #preprocessor = FillMaskPreprocessor( + # model_dir, first_sequence='sentence', second_sequence=None) + #model = MaskedLanguageModel(model_dir) + #pipeline1 = FillMaskPipeline(model, preprocessor) + #pipeline2 = pipeline( + # Tasks.fill_mask, model=model, preprocessor=preprocessor) + #for language in ["zh", "en"]: + # ori_text = self.ori_texts[language] + # test_input = self.test_inputs["zh"].replace("[MASK]", "") + # print( + # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: ' + # f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' def test_run_with_model_from_modelhub(self): - for language in ["zh"]: + for language in ['zh']: print(self.model_id_sbert[language]) model = Model.from_pretrained(self.model_id_sbert[language]) - print("model", model.model_dir) + print('model', model.model_dir) preprocessor = FillMaskPreprocessor( - model.model_dir, first_sequence='sentence', second_sequence=None) + model.model_dir, + first_sequence='sentence', + second_sequence=None) pipeline_ins = pipeline( - task=Tasks.fill_mask, model=model, preprocessor=preprocessor) - print(pipeline_ins(self_test_inputs[language])) - + task=Tasks.fill_mask, model=model, preprocessor=preprocessor) + print(pipeline_ins(self.test_inputs[language])) #def test_run_with_model_name(self): - ## veco - #pipeline_ins = pipeline( - # task=Tasks.fill_mask, model=self.model_id_veco) - #for language in ["zh", "en"]: - # input_ = self.test_inputs[language].replace("[MASK]", "") - # print(pipeline_ins(input_)) + ## veco + #pipeline_ins = pipeline( + # task=Tasks.fill_mask, model=self.model_id_veco) + #for language in ["zh", "en"]: + # input_ = self.test_inputs[language].replace("[MASK]", "") + # print(pipeline_ins(input_)) - ## structBert - #for language in ["zh"]: - # pipeline_ins = pipeline( - # task=Tasks.fill_mask, model=self.model_id_sbert[language]) - # print(pipeline_ins(self_test_inputs[language])) + ## structBert + #for language in ["zh"]: + # pipeline_ins = pipeline( + # task=Tasks.fill_mask, model=self.model_id_sbert[language]) + # print(pipeline_ins(self_test_inputs[language])) if __name__ == '__main__': unittest.main() - From 3aa1a70ac844ef2876eb41280d8015db0049d05c Mon Sep 17 00:00:00 2001 From: suluyan Date: Fri, 17 Jun 2022 00:36:25 +0800 Subject: [PATCH 3/7] add tests --- modelscope/models/nlp/__init__.py | 2 +- modelscope/pipelines/nlp/__init__.py | 2 +- .../pipelines/nlp/fill_mask_pipeline.py | 27 ++-- modelscope/pipelines/outputs.py | 6 + modelscope/preprocessors/nlp.py | 18 +-- requirements/nlp.txt | 2 +- tests/pipelines/test_fill_mask.py | 138 +++++++++++------- 7 files changed, 116 insertions(+), 79 deletions(-) diff --git a/modelscope/models/nlp/__init__.py b/modelscope/models/nlp/__init__.py index 2c6c6ba2..5801533b 100644 --- a/modelscope/models/nlp/__init__.py +++ b/modelscope/models/nlp/__init__.py @@ -1,4 +1,4 @@ +from .masked_language_model import * # noqa F403 from .sentence_similarity_model import * # noqa F403 from .sequence_classification_model import * # noqa F403 from .text_generation_model import * # noqa F403 -from .masked_language_model import * # noqa F403 diff --git a/modelscope/pipelines/nlp/__init__.py b/modelscope/pipelines/nlp/__init__.py index 3151b138..cf2f1c8b 100644 --- a/modelscope/pipelines/nlp/__init__.py +++ b/modelscope/pipelines/nlp/__init__.py @@ -1,4 +1,4 @@ +from .fill_mask_pipeline import * # noqa F403 from .sentence_similarity_pipeline import * # noqa F403 from .sequence_classification_pipeline import * # noqa F403 from .text_generation_pipeline import * # noqa F403 -from .fill_mask_pipeline import * # noqa F403 diff --git a/modelscope/pipelines/nlp/fill_mask_pipeline.py b/modelscope/pipelines/nlp/fill_mask_pipeline.py index aebf4e09..14b1d317 100644 --- a/modelscope/pipelines/nlp/fill_mask_pipeline.py +++ b/modelscope/pipelines/nlp/fill_mask_pipeline.py @@ -1,5 +1,6 @@ -from typing import Dict +from typing import Dict, Optional +from modelscope.models import Model from modelscope.models.nlp import MaskedLanguageModel from modelscope.preprocessors import FillMaskPreprocessor from modelscope.utils.constant import Tasks @@ -13,15 +14,23 @@ __all__ = ['FillMaskPipeline'] @PIPELINES.register_module(Tasks.fill_mask, module_name=r'veco') class FillMaskPipeline(Pipeline): - def __init__(self, model: MaskedLanguageModel, - preprocessor: FillMaskPreprocessor, **kwargs): - """use `model` and `preprocessor` to create a nlp text classification pipeline for prediction + def __init__(self, + model: MaskedLanguageModel, + preprocessor: Optional[FillMaskPreprocessor] = None, + **kwargs): + """use `model` and `preprocessor` to create a nlp fill mask pipeline for prediction Args: - model (SequenceClassificationModel): a model instance - preprocessor (SequenceClassificationPreprocessor): a preprocessor instance + model (MaskedLanguageModel): a model instance + preprocessor (FillMaskPreprocessor): a preprocessor instance """ - + sc_model = model if isinstance( + model, MaskedLanguageModel) else Model.from_pretrained(model) + if preprocessor is None: + preprocessor = FillMaskPreprocessor( + sc_model.model_dir, + first_sequence='sentence', + second_sequence=None) super().__init__(model=model, preprocessor=preprocessor, **kwargs) self.preprocessor = preprocessor self.tokenizer = preprocessor.tokenizer @@ -55,10 +64,10 @@ class FillMaskPipeline(Pipeline): pred_string = ''.join(pred_string).replace('##', '') pred_string = pred_string.split('[SEP]')[0].replace( '[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') - else: ## en bert + else: # en bert pred_string = self.tokenizer.decode(ids) pred_string = pred_string.split('[SEP]')[0].replace( '[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') pred_strings.append(pred_string) - return {'pred_string': pred_strings} + return {'text': pred_strings} diff --git a/modelscope/pipelines/outputs.py b/modelscope/pipelines/outputs.py index 1389abd3..b545d6eb 100644 --- a/modelscope/pipelines/outputs.py +++ b/modelscope/pipelines/outputs.py @@ -69,6 +69,12 @@ TASK_OUTPUTS = { # } Tasks.text_generation: ['text'], + # fill mask result for single sample + # { + # "text": "this is the text which masks filled by model." + # } + Tasks.fill_mask: ['text'], + # ============ audio tasks =================== # ============ multi-modal tasks =================== diff --git a/modelscope/preprocessors/nlp.py b/modelscope/preprocessors/nlp.py index 952e7b63..20c4877b 100644 --- a/modelscope/preprocessors/nlp.py +++ b/modelscope/preprocessors/nlp.py @@ -12,8 +12,7 @@ from .builder import PREPROCESSORS __all__ = [ 'Tokenize', 'SequenceClassificationPreprocessor', - 'TextGenerationPreprocessor', - 'FillMaskPreprocessor' + 'TextGenerationPreprocessor', 'FillMaskPreprocessor' ] @@ -173,8 +172,7 @@ class TextGenerationPreprocessor(Preprocessor): return {k: torch.tensor(v) for k, v in rst.items()} -@PREPROCESSORS.register_module( - Fields.nlp, module_name=r'sbert') +@PREPROCESSORS.register_module(Fields.nlp, module_name=r'sbert') class FillMaskPreprocessor(Preprocessor): def __init__(self, model_dir: str, *args, **kwargs): @@ -190,7 +188,8 @@ class FillMaskPreprocessor(Preprocessor): 'first_sequence') self.sequence_length = kwargs.pop('sequence_length', 128) - self.tokenizer = AutoTokenizer.from_pretrained(model_dir) + self.tokenizer = AutoTokenizer.from_pretrained( + model_dir, use_fast=False) @type_assert(object, str) def __call__(self, data: str) -> Dict[str, Any]: @@ -205,15 +204,11 @@ class FillMaskPreprocessor(Preprocessor): Dict[str, Any]: the preprocessed data """ import torch - + new_data = {self.first_sequence: data} # preprocess the data for the model input - rst = { - 'input_ids': [], - 'attention_mask': [], - 'token_type_ids': [] - } + rst = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []} max_seq_length = self.sequence_length @@ -230,4 +225,3 @@ class FillMaskPreprocessor(Preprocessor): rst['token_type_ids'].append(feature['token_type_ids']) return {k: torch.tensor(v) for k, v in rst.items()} - diff --git a/requirements/nlp.txt b/requirements/nlp.txt index 8de83798..261b9ec5 100644 --- a/requirements/nlp.txt +++ b/requirements/nlp.txt @@ -1 +1 @@ -https://alinlp.alibaba-inc.com/pypi/sofa-1.0.1.3-py3-none-any.whl +https://alinlp.alibaba-inc.com/pypi/sofa-1.0.3-py3-none-any.whl diff --git a/tests/pipelines/test_fill_mask.py b/tests/pipelines/test_fill_mask.py index 8b021c03..b9e0defc 100644 --- a/tests/pipelines/test_fill_mask.py +++ b/tests/pipelines/test_fill_mask.py @@ -23,82 +23,110 @@ class FillMaskTest(unittest.TestCase): ori_texts = { 'zh': - f'段誉轻挥折扇,摇了摇头,说道:“你师父是你的师父,你师父可不是我的师父。' - f'你师父差得动你,你师父可差不动我。', + '段誉轻挥折扇,摇了摇头,说道:“你师父是你的师父,你师父可不是我的师父。' + '你师父差得动你,你师父可差不动我。', 'en': - f'Everything in what you call reality is really just a r' - f'eflection of your consciousness. Your whole universe is' - f'just a mirror reflection of your story.' + 'Everything in what you call reality is really just a reflection of your ' + 'consciousness. Your whole universe is just a mirror reflection of your story.' } test_inputs = { 'zh': - f'段誉轻[MASK]折扇,摇了摇[MASK],[MASK]道:“你师父是你的[MASK][MASK]' - f',你师父可不是[MASK]的师父。你师父差得动你,你师父可[MASK]不动我。', + '段誉轻[MASK]折扇,摇了摇[MASK],[MASK]道:“你师父是你的[MASK][MASK],你' + '师父可不是[MASK]的师父。你师父差得动你,你师父可[MASK]不动我。', 'en': - f'Everything in [MASK] you call reality is really [MASK] a ' - f'reflection of your [MASK]. Your whole universe is just a ' - f'mirror [MASK] of your story.' + 'Everything in [MASK] you call reality is really [MASK] a reflection of your ' + '[MASK]. Your [MASK] universe is just a mirror [MASK] of your story.' } - #def test_run(self): - # # sbert - # for language in ["zh", "en"]: - # model_dir = snapshot_download(self.model_id_sbert[language]) - # preprocessor = FillMaskPreprocessor( - # model_dir, first_sequence='sentence', second_sequence=None) - # model = MaskedLanguageModel(model_dir) - # pipeline1 = FillMaskPipeline(model, preprocessor) - # pipeline2 = pipeline( - # Tasks.fill_mask, model=model, preprocessor=preprocessor) - # ori_text = self.ori_texts[language] - # test_input = self.test_inputs[language] - # print( - # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: ' - # f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' - # ) + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + def test_run_by_direct_model_download(self): + # sbert + for language in ['zh', 'en']: + model_dir = snapshot_download(self.model_id_sbert[language]) + preprocessor = FillMaskPreprocessor( + model_dir, first_sequence='sentence', second_sequence=None) + model = MaskedLanguageModel(model_dir) + pipeline1 = FillMaskPipeline(model, preprocessor) + pipeline2 = pipeline( + Tasks.fill_mask, model=model, preprocessor=preprocessor) + ori_text = self.ori_texts[language] + test_input = self.test_inputs[language] + print( + f'\nori_text: {ori_text}\ninput: {test_input}\npipeline1: ' + f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}\n' + ) - ## veco - #model_dir = snapshot_download(self.model_id_veco) - #preprocessor = FillMaskPreprocessor( - # model_dir, first_sequence='sentence', second_sequence=None) - #model = MaskedLanguageModel(model_dir) - #pipeline1 = FillMaskPipeline(model, preprocessor) - #pipeline2 = pipeline( - # Tasks.fill_mask, model=model, preprocessor=preprocessor) - #for language in ["zh", "en"]: - # ori_text = self.ori_texts[language] - # test_input = self.test_inputs["zh"].replace("[MASK]", "") - # print( - # f'ori_text: {ori_text}\ninput: {test_input}\npipeline1: ' - # f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}' + # veco + model_dir = snapshot_download(self.model_id_veco) + preprocessor = FillMaskPreprocessor( + model_dir, first_sequence='sentence', second_sequence=None) + model = MaskedLanguageModel(model_dir) + pipeline1 = FillMaskPipeline(model, preprocessor) + pipeline2 = pipeline( + Tasks.fill_mask, model=model, preprocessor=preprocessor) + for language in ['zh', 'en']: + ori_text = self.ori_texts[language] + test_input = self.test_inputs[language].replace('[MASK]', '') + print( + f'\nori_text: {ori_text}\ninput: {test_input}\npipeline1: ' + f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}\n' + ) + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') def test_run_with_model_from_modelhub(self): - for language in ['zh']: + # sbert + for language in ['zh', 'en']: print(self.model_id_sbert[language]) model = Model.from_pretrained(self.model_id_sbert[language]) - print('model', model.model_dir) preprocessor = FillMaskPreprocessor( model.model_dir, first_sequence='sentence', second_sequence=None) pipeline_ins = pipeline( task=Tasks.fill_mask, model=model, preprocessor=preprocessor) - print(pipeline_ins(self.test_inputs[language])) + print( + f'\nori_text: {self.ori_texts[language]}\ninput: {self.test_inputs[language]}\npipeline: ' + f'{pipeline_ins(self.test_inputs[language])}\n') - #def test_run_with_model_name(self): - ## veco - #pipeline_ins = pipeline( - # task=Tasks.fill_mask, model=self.model_id_veco) - #for language in ["zh", "en"]: - # input_ = self.test_inputs[language].replace("[MASK]", "") - # print(pipeline_ins(input_)) + # veco + model = Model.from_pretrained(self.model_id_veco) + preprocessor = FillMaskPreprocessor( + model.model_dir, first_sequence='sentence', second_sequence=None) + pipeline_ins = pipeline( + Tasks.fill_mask, model=model, preprocessor=preprocessor) + for language in ['zh', 'en']: + ori_text = self.ori_texts[language] + test_input = self.test_inputs[language].replace('[MASK]', '') + print(f'\nori_text: {ori_text}\ninput: {test_input}\npipeline: ' + f'{pipeline_ins(test_input)}\n') - ## structBert - #for language in ["zh"]: - # pipeline_ins = pipeline( - # task=Tasks.fill_mask, model=self.model_id_sbert[language]) - # print(pipeline_ins(self_test_inputs[language])) + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + def test_run_with_model_name(self): + # veco + pipeline_ins = pipeline(task=Tasks.fill_mask, model=self.model_id_veco) + for language in ['zh', 'en']: + ori_text = self.ori_texts[language] + test_input = self.test_inputs[language].replace('[MASK]', '') + print(f'\nori_text: {ori_text}\ninput: {test_input}\npipeline: ' + f'{pipeline_ins(test_input)}\n') + + # structBert + language = 'zh' + pipeline_ins = pipeline( + task=Tasks.fill_mask, model=self.model_id_sbert[language]) + print( + f'\nori_text: {self.ori_texts[language]}\ninput: {self.test_inputs[language]}\npipeline: ' + f'{pipeline_ins(self.test_inputs[language])}\n') + + @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + def test_run_with_default_model(self): + pipeline_ins = pipeline(task=Tasks.fill_mask) + language = 'en' + ori_text = self.ori_texts[language] + test_input = self.test_inputs[language].replace('[MASK]', '') + print(f'\nori_text: {ori_text}\ninput: {test_input}\npipeline: ' + f'{pipeline_ins(test_input)}\n') if __name__ == '__main__': From bb5a40ad8c40439c4b0e940ebf318dcd50d9ffc9 Mon Sep 17 00:00:00 2001 From: suluyan Date: Fri, 17 Jun 2022 11:54:42 +0800 Subject: [PATCH 4/7] test level >= 0 --- tests/pipelines/test_fill_mask.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_fill_mask.py b/tests/pipelines/test_fill_mask.py index b9e0defc..293608e0 100644 --- a/tests/pipelines/test_fill_mask.py +++ b/tests/pipelines/test_fill_mask.py @@ -73,7 +73,7 @@ class FillMaskTest(unittest.TestCase): f'{pipeline1(test_input)}\npipeline2: {pipeline2(test_input)}\n' ) - @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_from_modelhub(self): # sbert for language in ['zh', 'en']: @@ -101,7 +101,7 @@ class FillMaskTest(unittest.TestCase): print(f'\nori_text: {ori_text}\ninput: {test_input}\npipeline: ' f'{pipeline_ins(test_input)}\n') - @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_name(self): # veco pipeline_ins = pipeline(task=Tasks.fill_mask, model=self.model_id_veco) @@ -119,7 +119,7 @@ class FillMaskTest(unittest.TestCase): f'\nori_text: {self.ori_texts[language]}\ninput: {self.test_inputs[language]}\npipeline: ' f'{pipeline_ins(self.test_inputs[language])}\n') - @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_default_model(self): pipeline_ins = pipeline(task=Tasks.fill_mask) language = 'en' From c7a19c9c1f38442523251ccab4981b44669d05b5 Mon Sep 17 00:00:00 2001 From: suluyan Date: Mon, 20 Jun 2022 16:17:31 +0800 Subject: [PATCH 5/7] fix comments: rename and refactor AliceMindMLM; adjust pipeline --- .../models/nlp/masked_language_model.py | 24 ++++--- .../pipelines/nlp/fill_mask_pipeline.py | 63 ++++++++++++------- tests/pipelines/test_fill_mask.py | 8 +-- 3 files changed, 59 insertions(+), 36 deletions(-) diff --git a/modelscope/models/nlp/masked_language_model.py b/modelscope/models/nlp/masked_language_model.py index cb12a4dd..848d7484 100644 --- a/modelscope/models/nlp/masked_language_model.py +++ b/modelscope/models/nlp/masked_language_model.py @@ -6,12 +6,12 @@ from ...utils.constant import Tasks from ..base import Model, Tensor from ..builder import MODELS -__all__ = ['MaskedLanguageModel'] +__all__ = [ + 'StructBertForMaskedLM', 'VecoForMaskedLM', 'AliceMindBaseForMaskedLM' +] -@MODELS.register_module(Tasks.fill_mask, module_name=r'sbert') -@MODELS.register_module(Tasks.fill_mask, module_name=r'veco') -class MaskedLanguageModel(Model): +class AliceMindBaseForMaskedLM(Model): def __init__(self, model_dir: str, *args, **kwargs): from sofa.utils.backend import AutoConfig, AutoModelForMaskedLM @@ -30,15 +30,19 @@ class MaskedLanguageModel(Model): Returns: Dict[str, np.ndarray]: results - Example: - { - 'predictions': array([1]), # lable 0-negative 1-positive - 'probabilities': array([[0.11491239, 0.8850876 ]], dtype=float32), - 'logits': array([[-0.53860897, 1.5029076 ]], dtype=float32) # true value - } """ rst = self.model( input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids']) return {'logits': rst['logits'], 'input_ids': inputs['input_ids']} + + +@MODELS.register_module(Tasks.fill_mask, module_name=r'sbert') +class StructBertForMaskedLM(AliceMindBaseForMaskedLM): + pass + + +@MODELS.register_module(Tasks.fill_mask, module_name=r'veco') +class VecoForMaskedLM(AliceMindBaseForMaskedLM): + pass diff --git a/modelscope/pipelines/nlp/fill_mask_pipeline.py b/modelscope/pipelines/nlp/fill_mask_pipeline.py index 14b1d317..abe5b5b5 100644 --- a/modelscope/pipelines/nlp/fill_mask_pipeline.py +++ b/modelscope/pipelines/nlp/fill_mask_pipeline.py @@ -1,7 +1,7 @@ from typing import Dict, Optional from modelscope.models import Model -from modelscope.models.nlp import MaskedLanguageModel +from modelscope.models.nlp import AliceMindBaseForMaskedLM from modelscope.preprocessors import FillMaskPreprocessor from modelscope.utils.constant import Tasks from ..base import Pipeline, Tensor @@ -15,20 +15,20 @@ __all__ = ['FillMaskPipeline'] class FillMaskPipeline(Pipeline): def __init__(self, - model: MaskedLanguageModel, + model: AliceMindBaseForMaskedLM, preprocessor: Optional[FillMaskPreprocessor] = None, **kwargs): """use `model` and `preprocessor` to create a nlp fill mask pipeline for prediction Args: - model (MaskedLanguageModel): a model instance + model (AliceMindBaseForMaskedLM): a model instance preprocessor (FillMaskPreprocessor): a preprocessor instance """ - sc_model = model if isinstance( - model, MaskedLanguageModel) else Model.from_pretrained(model) + fill_mask_model = model if isinstance( + model, AliceMindBaseForMaskedLM) else Model.from_pretrained(model) if preprocessor is None: preprocessor = FillMaskPreprocessor( - sc_model.model_dir, + fill_mask_model.model_dir, first_sequence='sentence', second_sequence=None) super().__init__(model=model, preprocessor=preprocessor, **kwargs) @@ -36,6 +36,27 @@ class FillMaskPipeline(Pipeline): self.tokenizer = preprocessor.tokenizer self.mask_id = {'veco': 250001, 'sbert': 103} + self.rep_map = { + 'sbert': { + '[unused0]': '', + '[PAD]': '', + '[unused1]': '', + r' +': ' ', + '[SEP]': '', + '[unused2]': '', + '[CLS]': '', + '[UNK]': '' + }, + 'veco': { + r' +': ' ', + '': '', + '': '', + '': '', + '': '', + '': ' ' + } + } + def postprocess(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]: """process the prediction results @@ -49,25 +70,23 @@ class FillMaskPipeline(Pipeline): logits = inputs['logits'].detach().numpy() input_ids = inputs['input_ids'].detach().numpy() pred_ids = np.argmax(logits, axis=-1) - rst_ids = np.where( - input_ids == self.mask_id[self.model.config.model_type], pred_ids, - input_ids) + model_type = self.model.config.model_type + rst_ids = np.where(input_ids == self.mask_id[model_type], pred_ids, + input_ids) + + def rep_tokens(string, rep_map): + for k, v in rep_map.items(): + string = string.replace(k, v) + return string.strip() + pred_strings = [] - for ids in rst_ids: - if self.model.config.model_type == 'veco': - pred_string = self.tokenizer.decode(ids).split( - '')[0].replace('', - '').replace('', - '').replace('', '') - elif self.model.config.vocab_size == 21128: # zh bert + for ids in rst_ids: # batch + if self.model.config.vocab_size == 21128: # zh bert pred_string = self.tokenizer.convert_ids_to_tokens(ids) - pred_string = ''.join(pred_string).replace('##', '') - pred_string = pred_string.split('[SEP]')[0].replace( - '[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') - else: # en bert + pred_string = ''.join(pred_string) + else: pred_string = self.tokenizer.decode(ids) - pred_string = pred_string.split('[SEP]')[0].replace( - '[CLS]', '').replace('[SEP]', '').replace('[UNK]', '') + pred_string = rep_tokens(pred_string, self.rep_map[model_type]) pred_strings.append(pred_string) return {'text': pred_strings} diff --git a/tests/pipelines/test_fill_mask.py b/tests/pipelines/test_fill_mask.py index 293608e0..a4d53403 100644 --- a/tests/pipelines/test_fill_mask.py +++ b/tests/pipelines/test_fill_mask.py @@ -6,7 +6,7 @@ import unittest from maas_hub.snapshot_download import snapshot_download from modelscope.models import Model -from modelscope.models.nlp import MaskedLanguageModel +from modelscope.models.nlp import StructBertForMaskedLM, VecoForMaskedLM from modelscope.pipelines import FillMaskPipeline, pipeline from modelscope.preprocessors import FillMaskPreprocessor from modelscope.utils.constant import Tasks @@ -39,14 +39,14 @@ class FillMaskTest(unittest.TestCase): '[MASK]. Your [MASK] universe is just a mirror [MASK] of your story.' } - @unittest.skipUnless(test_level() >= 2, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_run_by_direct_model_download(self): # sbert for language in ['zh', 'en']: model_dir = snapshot_download(self.model_id_sbert[language]) preprocessor = FillMaskPreprocessor( model_dir, first_sequence='sentence', second_sequence=None) - model = MaskedLanguageModel(model_dir) + model = StructBertForMaskedLM(model_dir) pipeline1 = FillMaskPipeline(model, preprocessor) pipeline2 = pipeline( Tasks.fill_mask, model=model, preprocessor=preprocessor) @@ -61,7 +61,7 @@ class FillMaskTest(unittest.TestCase): model_dir = snapshot_download(self.model_id_veco) preprocessor = FillMaskPreprocessor( model_dir, first_sequence='sentence', second_sequence=None) - model = MaskedLanguageModel(model_dir) + model = VecoForMaskedLM(model_dir) pipeline1 = FillMaskPipeline(model, preprocessor) pipeline2 = pipeline( Tasks.fill_mask, model=model, preprocessor=preprocessor) From b049f17f547320c652289bb5b46dedcf8f1e7f6e Mon Sep 17 00:00:00 2001 From: suluyan Date: Tue, 21 Jun 2022 15:52:22 +0800 Subject: [PATCH 6/7] fix comments --- modelscope/models/nlp/masked_language_model.py | 8 +++++--- modelscope/pipelines/nlp/fill_mask_pipeline.py | 7 ++++--- modelscope/preprocessors/nlp.py | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/modelscope/models/nlp/masked_language_model.py b/modelscope/models/nlp/masked_language_model.py index 848d7484..514c72c7 100644 --- a/modelscope/models/nlp/masked_language_model.py +++ b/modelscope/models/nlp/masked_language_model.py @@ -6,9 +6,7 @@ from ...utils.constant import Tasks from ..base import Model, Tensor from ..builder import MODELS -__all__ = [ - 'StructBertForMaskedLM', 'VecoForMaskedLM', 'AliceMindBaseForMaskedLM' -] +__all__ = ['StructBertForMaskedLM', 'VecoForMaskedLM'] class AliceMindBaseForMaskedLM(Model): @@ -40,9 +38,13 @@ class AliceMindBaseForMaskedLM(Model): @MODELS.register_module(Tasks.fill_mask, module_name=r'sbert') class StructBertForMaskedLM(AliceMindBaseForMaskedLM): + # The StructBert for MaskedLM uses the same underlying model structure + # as the base model class. pass @MODELS.register_module(Tasks.fill_mask, module_name=r'veco') class VecoForMaskedLM(AliceMindBaseForMaskedLM): + # The Veco for MaskedLM uses the same underlying model structure + # as the base model class. pass diff --git a/modelscope/pipelines/nlp/fill_mask_pipeline.py b/modelscope/pipelines/nlp/fill_mask_pipeline.py index abe5b5b5..d7c1d456 100644 --- a/modelscope/pipelines/nlp/fill_mask_pipeline.py +++ b/modelscope/pipelines/nlp/fill_mask_pipeline.py @@ -1,7 +1,8 @@ -from typing import Dict, Optional +from typing import Dict, Optional, Union from modelscope.models import Model -from modelscope.models.nlp import AliceMindBaseForMaskedLM +from modelscope.models.nlp.masked_language_model import \ + AliceMindBaseForMaskedLM from modelscope.preprocessors import FillMaskPreprocessor from modelscope.utils.constant import Tasks from ..base import Pipeline, Tensor @@ -15,7 +16,7 @@ __all__ = ['FillMaskPipeline'] class FillMaskPipeline(Pipeline): def __init__(self, - model: AliceMindBaseForMaskedLM, + model: Union[AliceMindBaseForMaskedLM, str], preprocessor: Optional[FillMaskPreprocessor] = None, **kwargs): """use `model` and `preprocessor` to create a nlp fill mask pipeline for prediction diff --git a/modelscope/preprocessors/nlp.py b/modelscope/preprocessors/nlp.py index c2f72292..fc910e98 100644 --- a/modelscope/preprocessors/nlp.py +++ b/modelscope/preprocessors/nlp.py @@ -180,7 +180,7 @@ class TextGenerationPreprocessor(Preprocessor): return {k: torch.tensor(v) for k, v in rst.items()} -@PREPROCESSORS.register_module(Fields.nlp, module_name=r'sbert') +@PREPROCESSORS.register_module(Fields.nlp) class FillMaskPreprocessor(Preprocessor): def __init__(self, model_dir: str, *args, **kwargs): From 62c2877b608b15a9e76883d726fe26972de004e3 Mon Sep 17 00:00:00 2001 From: suluyan Date: Tue, 21 Jun 2022 16:30:22 +0800 Subject: [PATCH 7/7] fix isort for pre-commit check --- modelscope/models/nlp/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modelscope/models/nlp/__init__.py b/modelscope/models/nlp/__init__.py index 801832ad..6be4493b 100644 --- a/modelscope/models/nlp/__init__.py +++ b/modelscope/models/nlp/__init__.py @@ -1,5 +1,5 @@ -from .masked_language_model import * # noqa F403 from .bert_for_sequence_classification import * # noqa F403 +from .masked_language_model import * # noqa F403 from .palm_for_text_generation import * # noqa F403 from .sbert_for_sentence_similarity import * # noqa F403 from .sbert_for_token_classification import * # noqa F403