mirror of
https://github.com/modelscope/modelscope.git
synced 2026-02-24 20:19:51 +01:00
Merge branch 'release/1.1' of gitlab.alibaba-inc.com:Ali-MaaS/MaaS-lib into release/1.1
This commit is contained in:
15
README.md
15
README.md
@@ -1,3 +1,18 @@
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://pypi.org/project/modelscope/)
|
||||
<!-- [](https://easy-cv.readthedocs.io/en/latest/) -->
|
||||
[](https://github.com/modelscope/modelscope/blob/master/LICENSE)
|
||||
[](https://github.com/modelscope/modelscope/issues)
|
||||
[](https://GitHub.com/modelscope/modelscope/pull/)
|
||||
[](https://GitHub.com/modelscope/modelscope/commit/)
|
||||
<!-- [](https://GitHub.com/modelscope/modelscope/graphs/contributors/) -->
|
||||
<!-- [](http://makeapullrequest.com) -->
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
# Introduction
|
||||
|
||||
[ModelScope]( https://www.modelscope.cn) is a “Model-as-a-Service” (MaaS) platform that seeks to bring together most advanced machine learning models from the AI community, and to streamline the process of leveraging AI models in real applications. The core ModelScope library enables developers to perform inference, training and evaluation, through rich layers of API designs that facilitate a unified experience across state-of-the-art models from different AI domains.
|
||||
|
||||
@@ -94,7 +94,7 @@ class GitCommandWrapper(metaclass=Singleton):
|
||||
return False
|
||||
|
||||
def git_lfs_install(self, repo_dir):
|
||||
cmd = ['git', '-C', repo_dir, 'lfs', 'install']
|
||||
cmd = ['-C', repo_dir, 'lfs', 'install']
|
||||
try:
|
||||
self._run_git_command(*cmd)
|
||||
return True
|
||||
|
||||
@@ -57,7 +57,7 @@ class T5ForConditionalGeneration(T5PreTrainedModel):
|
||||
r'decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight',
|
||||
]
|
||||
|
||||
def __init__(self, config: T5Config):
|
||||
def __init__(self, config: T5Config, **kwargs):
|
||||
super().__init__(config)
|
||||
self.model_dim = config.d_model
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ class BertForDocumentSegmentation(BertPreTrainedModel):
|
||||
|
||||
_keys_to_ignore_on_load_unexpected = [r'pooler']
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config, **kwargs):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
self.sentence_pooler_type = None
|
||||
|
||||
@@ -11,7 +11,7 @@ from .backbone import BertModel, BertPreTrainedModel
|
||||
@MODELS.register_module(Tasks.sentence_embedding, module_name=Models.bert)
|
||||
class BertForSentenceEmbedding(BertPreTrainedModel):
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config, **kwargs):
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
setattr(self, self.base_model_prefix,
|
||||
|
||||
@@ -66,7 +66,7 @@ class BertForSequenceClassification(BertPreTrainedModel):
|
||||
weights.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config, **kwargs):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
self.config = config
|
||||
|
||||
@@ -25,7 +25,7 @@ __all__ = ['PoNetForDocumentSegmentation']
|
||||
class PoNetForDocumentSegmentation(PoNetPreTrainedModel):
|
||||
_keys_to_ignore_on_load_unexpected = [r'pooler']
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, config, **kwargs):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ class VideoHumanMattingPipeline(Pipeline):
|
||||
masks = []
|
||||
rec = [None] * 4
|
||||
self.model = self.model.to(self.device)
|
||||
logger.info('matting start using ', self.device)
|
||||
logger.info('matting start using ' + self.device)
|
||||
with torch.no_grad():
|
||||
while True:
|
||||
if frame is None:
|
||||
|
||||
@@ -77,14 +77,14 @@ class TranslationEvaluationPipeline(Pipeline):
|
||||
self.preprocessor.eval_mode = eval_mode
|
||||
return
|
||||
|
||||
def __call__(self, input_dict: Dict[str, Union[str, List[str]]], **kwargs):
|
||||
def __call__(self, input: Dict[str, Union[str, List[str]]], **kwargs):
|
||||
r"""Implementation of __call__ function.
|
||||
|
||||
Args:
|
||||
input_dict: The formatted dict containing the inputted sentences.
|
||||
input: The formatted dict containing the inputted sentences.
|
||||
An example of the formatted dict:
|
||||
```
|
||||
input_dict = {
|
||||
input = {
|
||||
'hyp': [
|
||||
'This is a sentence.',
|
||||
'This is another sentence.',
|
||||
@@ -100,7 +100,7 @@ class TranslationEvaluationPipeline(Pipeline):
|
||||
}
|
||||
```
|
||||
"""
|
||||
return super().__call__(input=input_dict, **kwargs)
|
||||
return super().__call__(input=input, **kwargs)
|
||||
|
||||
def forward(self,
|
||||
input_ids: List[torch.Tensor]) -> Dict[str, torch.Tensor]:
|
||||
|
||||
@@ -215,6 +215,10 @@ class CheckpointHook(Hook):
|
||||
# TODO a temp fix to avoid pipeline_name and task mismatch
|
||||
config['pipeline'] = {'type': config['task']}
|
||||
|
||||
# remove parallel module that is not JSON serializable
|
||||
if 'parallel' in config and 'module' in config['parallel']:
|
||||
del config['parallel']['module']
|
||||
|
||||
class SaveConfig:
|
||||
|
||||
def __init__(self, output_dir, config):
|
||||
@@ -422,4 +426,5 @@ class BestCkptSaverHook(CheckpointHook):
|
||||
|
||||
def after_run(self, trainer):
|
||||
if self.restore_best:
|
||||
self.load_checkpoint(self._best_ckpt_file, trainer)
|
||||
if is_master():
|
||||
self.load_checkpoint(self._best_ckpt_file, trainer)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Make sure to modify __release_datetime__ to release time when making official release.
|
||||
__version__ = '1.1.0'
|
||||
__version__ = '1.1.1'
|
||||
# default release datetime for branches under active development is set
|
||||
# to be a time far-far-away-into-the-future
|
||||
__release_datetime__ = '2022-11-30 16:13:12'
|
||||
__release_datetime__ = '2022-12-08 00:00:00'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
easyasr>=0.0.2
|
||||
espnet==202204
|
||||
funasr>=0.1.3
|
||||
funasr>=0.1.4
|
||||
h5py
|
||||
inflect
|
||||
keras
|
||||
|
||||
9
setup.py
9
setup.py
@@ -195,8 +195,8 @@ if __name__ == '__main__':
|
||||
long_description_content_type='text/markdown',
|
||||
author='Alibaba ModelScope team',
|
||||
author_email='modelscope@list.alibaba-inc.com',
|
||||
keywords='',
|
||||
url='TBD',
|
||||
keywords='python,nlp,science,cv,speech,multi-modal',
|
||||
url='https://github.com/modelscope/modelscope',
|
||||
packages=find_packages(exclude=('configs', 'tools', 'demo')),
|
||||
include_package_data=True,
|
||||
classifiers=[
|
||||
@@ -204,9 +204,10 @@ if __name__ == '__main__':
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
],
|
||||
license='Apache License 2.0',
|
||||
tests_require=parse_requirements('requirements/tests.txt'),
|
||||
|
||||
@@ -18,7 +18,7 @@ class TranslationEvaluationTest(unittest.TestCase, DemoCompatibilityCheck):
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
def test_run_with_model_name_for_unite_large(self):
|
||||
input_dict = {
|
||||
input = {
|
||||
'hyp': [
|
||||
'This is a sentence.',
|
||||
'This is another sentence.',
|
||||
@@ -34,17 +34,17 @@ class TranslationEvaluationTest(unittest.TestCase, DemoCompatibilityCheck):
|
||||
}
|
||||
|
||||
pipeline_ins = pipeline(self.task, model=self.model_id_large)
|
||||
print(pipeline_ins(input_dict))
|
||||
print(pipeline_ins(input=input))
|
||||
|
||||
pipeline_ins.change_eval_mode(eval_mode=EvaluationMode.SRC)
|
||||
print(pipeline_ins(input_dict))
|
||||
print(pipeline_ins(input=input))
|
||||
|
||||
pipeline_ins.change_eval_mode(eval_mode=EvaluationMode.REF)
|
||||
print(pipeline_ins(input_dict))
|
||||
print(pipeline_ins(input=input))
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
def test_run_with_model_name_for_unite_base(self):
|
||||
input_dict = {
|
||||
input = {
|
||||
'hyp': [
|
||||
'This is a sentence.',
|
||||
'This is another sentence.',
|
||||
@@ -60,13 +60,13 @@ class TranslationEvaluationTest(unittest.TestCase, DemoCompatibilityCheck):
|
||||
}
|
||||
|
||||
pipeline_ins = pipeline(self.task, model=self.model_id_base)
|
||||
print(pipeline_ins(input_dict))
|
||||
print(pipeline_ins(input=input))
|
||||
|
||||
pipeline_ins.change_eval_mode(eval_mode=EvaluationMode.SRC)
|
||||
print(pipeline_ins(input_dict))
|
||||
print(pipeline_ins(input=input))
|
||||
|
||||
pipeline_ins.change_eval_mode(eval_mode=EvaluationMode.REF)
|
||||
print(pipeline_ins(input_dict))
|
||||
print(pipeline_ins(input=input))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
Reference in New Issue
Block a user