mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-23 11:39:26 +01:00
1.新增支持原始bert模型(非easynlp的 backbone prefix版本)
2.支持bert的在sequence classification/fill mask /token classification上的backbone head形式
3.统一了sequence classification几个任务的pipeline到一个类
4.fill mask 支持backbone head形式
5.token classification的几个子任务(ner,word seg, part of speech)的preprocessor 统一到了一起TokenClassificationPreprocessor
6. sequence classification的几个子任务(single classification, pair classification)的preprocessor 统一到了一起SequenceClassificationPreprocessor
7. 改动register中 cls的group_key 赋值位置,之前的group_key在多个decorators的情况下,会被覆盖,obj_cls的group_key信息不正确
8. 基于backbone head形式将 原本group_key和 module同名的情况尝试做调整,如下在modelscope/pipelines/nlp/sequence_classification_pipeline.py 中
原本
@PIPELINES.register_module(
Tasks.sentiment_classification, module_name=Pipelines.sentiment_classification)
改成
@PIPELINES.register_module(
Tasks.text_classification, module_name=Pipelines.sentiment_classification)
相应的configuration.json也有改动,这样的改动更符合任务和pipline(子任务)的关系。
8. 其他相应改动为支持上述功能
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10041463
101 lines
3.7 KiB
Python
101 lines
3.7 KiB
Python
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
import unittest
|
|
|
|
from modelscope.models import Model
|
|
from modelscope.msdatasets import MsDataset
|
|
from modelscope.pipelines import pipeline
|
|
from modelscope.pipelines.nlp import SequenceClassificationPipeline
|
|
from modelscope.preprocessors import SequenceClassificationPreprocessor
|
|
from modelscope.utils.constant import Tasks
|
|
from modelscope.utils.demo_utils import DemoCompatibilityCheck
|
|
from modelscope.utils.test_utils import test_level
|
|
|
|
|
|
class SequenceClassificationTest(unittest.TestCase, DemoCompatibilityCheck):
|
|
sentence1 = 'i like this wonderful place'
|
|
|
|
def setUp(self) -> None:
|
|
self.model_id = 'damo/bert-base-sst2'
|
|
self.task = Tasks.text_classification
|
|
|
|
def predict(self, pipeline_ins: SequenceClassificationPipeline):
|
|
from easynlp.appzoo import load_dataset
|
|
|
|
set = load_dataset('glue', 'sst2')
|
|
data = set['test']['sentence'][:3]
|
|
|
|
results = pipeline_ins(data[0])
|
|
print(results)
|
|
results = pipeline_ins(data[1])
|
|
print(results)
|
|
|
|
print(data)
|
|
|
|
def printDataset(self, dataset: MsDataset):
|
|
for i, r in enumerate(dataset):
|
|
if i > 10:
|
|
break
|
|
print(r)
|
|
|
|
# @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
@unittest.skip('nlp model does not support tensor input, skipped')
|
|
def test_run_with_model_from_modelhub(self):
|
|
model = Model.from_pretrained(self.model_id)
|
|
preprocessor = SequenceClassificationPreprocessor(
|
|
model.model_dir, first_sequence='sentence', second_sequence=None)
|
|
pipeline_ins = pipeline(
|
|
task=Tasks.text_classification,
|
|
model=model,
|
|
preprocessor=preprocessor)
|
|
print(f'sentence1: {self.sentence1}\n'
|
|
f'pipeline1:{pipeline_ins(input=self.sentence1)}')
|
|
|
|
# @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
@unittest.skip('nlp model does not support tensor input, skipped')
|
|
def test_run_with_model_name(self):
|
|
text_classification = pipeline(
|
|
task=Tasks.text_classification, model=self.model_id)
|
|
result = text_classification(
|
|
MsDataset.load(
|
|
'xcopa',
|
|
subset_name='translation-et',
|
|
namespace='damotest',
|
|
split='test',
|
|
target='premise'))
|
|
self.printDataset(result)
|
|
|
|
# @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
|
|
@unittest.skip('nlp model does not support tensor input, skipped')
|
|
def test_run_with_default_model(self):
|
|
text_classification = pipeline(task=Tasks.text_classification)
|
|
result = text_classification(
|
|
MsDataset.load(
|
|
'xcopa',
|
|
subset_name='translation-et',
|
|
namespace='damotest',
|
|
split='test',
|
|
target='premise'))
|
|
self.printDataset(result)
|
|
|
|
# @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
|
|
@unittest.skip('nlp model does not support tensor input, skipped')
|
|
def test_run_with_modelscope_dataset(self):
|
|
text_classification = pipeline(task=Tasks.text_classification)
|
|
# loaded from modelscope dataset
|
|
dataset = MsDataset.load(
|
|
'xcopa',
|
|
subset_name='translation-et',
|
|
namespace='damotest',
|
|
split='test',
|
|
target='premise')
|
|
result = text_classification(dataset)
|
|
self.printDataset(result)
|
|
|
|
@unittest.skip('demo compatibility test is only enabled on a needed-basis')
|
|
def test_demo_compatibility(self):
|
|
self.compatibility_check()
|
|
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|