mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-24 03:59:23 +01:00
Merge remote-tracking branch 'origin/master' into ofa/finetune
This commit is contained in:
@@ -14,6 +14,8 @@ from modelscope.utils.constant import Tasks
|
||||
|
||||
@HEADS.register_module(
|
||||
Tasks.token_classification, module_name=Heads.token_classification)
|
||||
@HEADS.register_module(
|
||||
Tasks.part_of_speech, module_name=Heads.token_classification)
|
||||
class TokenClassificationHead(TorchHead):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
@@ -19,6 +19,8 @@ __all__ = ['TokenClassificationModel']
|
||||
|
||||
@MODELS.register_module(
|
||||
Tasks.token_classification, module_name=TaskModels.token_classification)
|
||||
@MODELS.register_module(
|
||||
Tasks.part_of_speech, module_name=TaskModels.token_classification)
|
||||
class TokenClassificationModel(SingleBackboneTaskModelBase):
|
||||
|
||||
def __init__(self, model_dir: str, *args, **kwargs):
|
||||
|
||||
@@ -25,6 +25,8 @@ DEFAULT_MODEL_FOR_PIPELINE = {
|
||||
Tasks.word_segmentation:
|
||||
(Pipelines.word_segmentation,
|
||||
'damo/nlp_structbert_word-segmentation_chinese-base'),
|
||||
Tasks.part_of_speech: (Pipelines.part_of_speech,
|
||||
'damo/nlp_structbert_part-of-speech_chinese-base'),
|
||||
Tasks.token_classification:
|
||||
(Pipelines.part_of_speech,
|
||||
'damo/nlp_structbert_part-of-speech_chinese-base'),
|
||||
|
||||
@@ -18,6 +18,8 @@ __all__ = ['TokenClassificationPipeline']
|
||||
|
||||
@PIPELINES.register_module(
|
||||
Tasks.token_classification, module_name=Pipelines.part_of_speech)
|
||||
@PIPELINES.register_module(
|
||||
Tasks.part_of_speech, module_name=Pipelines.part_of_speech)
|
||||
class TokenClassificationPipeline(Pipeline):
|
||||
|
||||
def __init__(self,
|
||||
|
||||
@@ -13,7 +13,7 @@ from modelscope.utils.test_utils import test_level
|
||||
|
||||
|
||||
class PartOfSpeechTest(unittest.TestCase):
|
||||
model_id = 'damo/nlp_structbert_part-of-speech_chinese-base'
|
||||
model_id = 'damo/nlp_structbert_part-of-speech_chinese-lite'
|
||||
sentence = '今天天气不错,适合出去游玩'
|
||||
|
||||
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
|
||||
@@ -34,20 +34,17 @@ class PartOfSpeechTest(unittest.TestCase):
|
||||
model = Model.from_pretrained(self.model_id)
|
||||
tokenizer = TokenClassificationPreprocessor(model.model_dir)
|
||||
pipeline_ins = pipeline(
|
||||
task=Tasks.token_classification,
|
||||
model=model,
|
||||
preprocessor=tokenizer)
|
||||
task=Tasks.part_of_speech, model=model, preprocessor=tokenizer)
|
||||
print(pipeline_ins(input=self.sentence))
|
||||
|
||||
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
|
||||
def test_run_with_model_name(self):
|
||||
pipeline_ins = pipeline(
|
||||
task=Tasks.token_classification, model=self.model_id)
|
||||
pipeline_ins = pipeline(task=Tasks.part_of_speech, model=self.model_id)
|
||||
print(pipeline_ins(input=self.sentence))
|
||||
|
||||
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
|
||||
def test_run_with_default_model(self):
|
||||
pipeline_ins = pipeline(task=Tasks.token_classification)
|
||||
pipeline_ins = pipeline(task=Tasks.part_of_speech)
|
||||
print(pipeline_ins(input=self.sentence))
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user