diff --git a/.github/workflows/citest.yaml b/.github/workflows/citest.yaml index 5b0e61f2..1ff78a65 100644 --- a/.github/workflows/citest.yaml +++ b/.github/workflows/citest.yaml @@ -40,6 +40,7 @@ jobs: unittest: # The type of runner that the job will run on runs-on: [modelscope-self-hosted] + timeout-minutes: 240 steps: - name: ResetFileMode shell: bash diff --git a/MANIFEST.in b/MANIFEST.in index 3cd79b03..c1739719 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,2 @@ recursive-include modelscope/configs *.py *.cu *.h *.cpp +recursive-include modelscope/cli/template *.tpl diff --git a/README.md b/README.md index c9b071ab..4a4ce792 100644 --- a/README.md +++ b/README.md @@ -203,12 +203,20 @@ To allow out-of-box usage for all the models on ModelScope, official docker imag CPU docker image ```shell -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.3.0 +# py37 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.6.1 + +# py38 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-1.6.1 ``` GPU docker image ```shell -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.3.0 +# py37 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1 + +# py38 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-1.6.1 ``` ## Setup Local Python Environment diff --git a/README_zh.md b/README_zh.md index 05d00730..f5401f33 100644 --- a/README_zh.md +++ b/README_zh.md @@ -189,12 +189,20 @@ ModelScope Library目前支持tensorflow,pytorch深度学习框架进行模型 CPU镜像 ```shell -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.3.0 +# py37 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.6.1 + +# py38 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-1.6.1 ``` GPU镜像 ```shell -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.3.0 +# py37 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1 + +# py38 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-1.6.1 ``` ## 搭建本地Python环境 diff --git a/modelscope/cli/modelcard.py b/modelscope/cli/modelcard.py index 6c28d2de..5e2b6580 100644 --- a/modelscope/cli/modelcard.py +++ b/modelscope/cli/modelcard.py @@ -13,8 +13,8 @@ from modelscope.utils.logger import get_logger logger = get_logger() -curren_path = os.path.dirname(os.path.abspath(__file__)) -template_path = os.path.join(curren_path, 'template') +current_path = os.path.dirname(os.path.abspath(__file__)) +template_path = os.path.join(current_path, 'template') def subparser_func(args): diff --git a/modelscope/cli/pipeline.py b/modelscope/cli/pipeline.py index 2f34b786..793632e0 100644 --- a/modelscope/cli/pipeline.py +++ b/modelscope/cli/pipeline.py @@ -8,8 +8,8 @@ from modelscope.utils.logger import get_logger logger = get_logger() -curren_path = os.path.dirname(os.path.abspath(__file__)) -template_path = os.path.join(curren_path, 'template') +current_path = os.path.dirname(os.path.abspath(__file__)) +template_path = os.path.join(current_path, 'template') def subparser_func(args): diff --git a/modelscope/preprocessors/nlp/space/batch.py b/modelscope/preprocessors/nlp/space/batch.py index 7172b1ab..39adb021 100644 --- a/modelscope/preprocessors/nlp/space/batch.py +++ b/modelscope/preprocessors/nlp/space/batch.py @@ -50,7 +50,7 @@ def batch(reader, batch_size, drop_last=False): # Batch size check batch_size = int(batch_size) if batch_size <= 0: - raise ValueError('batch_size should be a positive integeral value, ' + raise ValueError('batch_size should be a positive integer value, ' 'but got batch_size={}'.format(batch_size)) return batch_reader diff --git a/modelscope/preprocessors/nlp/space/fields/intent_field.py b/modelscope/preprocessors/nlp/space/fields/intent_field.py index 29ea915e..99f37b24 100644 --- a/modelscope/preprocessors/nlp/space/fields/intent_field.py +++ b/modelscope/preprocessors/nlp/space/fields/intent_field.py @@ -344,7 +344,7 @@ class BPETextField(object): print(f'Saved {len(examples)} examples (elapsed {elapsed:.2f}s)') else: print(f"Saving examples to '{filename}' ...") - raise ValueError(f'Unsport file format: {filename}') + raise ValueError(f'Unsupport file format: {filename}') def load_examples(self, filename): start = time.time() diff --git a/modelscope/tools/speech_tts_autolabel.py b/modelscope/tools/speech_tts_autolabel.py index a774c44f..535d73f6 100644 --- a/modelscope/tools/speech_tts_autolabel.py +++ b/modelscope/tools/speech_tts_autolabel.py @@ -39,7 +39,7 @@ def run_auto_label(input_wav, if not os.path.exists(work_dir): raise ValueError(f'work_dir: {work_dir} not exists') - def _download_and_unzip_resousrce(model, model_revision=None): + def _download_and_unzip_resource(model, model_revision=None): if os.path.exists(model): model_cache_dir = model if os.path.isdir( model) else os.path.dirname(model) @@ -52,7 +52,7 @@ def run_auto_label(input_wav, revision=model_revision, user_agent={ThirdParty.KEY: 'speech_tts_autolabel'}) if not os.path.exists(model_cache_dir): - raise ValueError(f'mdoel_cache_dir: {model_cache_dir} not exists') + raise ValueError(f'model_cache_dir: {model_cache_dir} not exists') zip_file = os.path.join(model_cache_dir, 'model.zip') if not os.path.exists(zip_file): raise ValueError(f'zip_file: {zip_file} not exists') @@ -61,8 +61,8 @@ def run_auto_label(input_wav, target_resource = os.path.join(model_cache_dir, 'model') return target_resource - model_resource = _download_and_unzip_resousrce(resource_model_id, - resource_revision) + model_resource = _download_and_unzip_resource(resource_model_id, + resource_revision) auto_labeling = AutoLabeling( os.path.abspath(input_wav), model_resource, diff --git a/tests/pipelines/test_controllable_image_generation.py b/tests/pipelines/test_controllable_image_generation.py index 7d6b03ce..c1a29f5b 100644 --- a/tests/pipelines/test_controllable_image_generation.py +++ b/tests/pipelines/test_controllable_image_generation.py @@ -1,4 +1,5 @@ # Copyright (c) Alibaba, Inc. and its affiliates. +import random import tempfile import unittest @@ -27,40 +28,13 @@ class ControllableImageGenerationTest(unittest.TestCase): @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_model_from_modelhub(self): output_image_path = tempfile.NamedTemporaryFile(suffix='.png').name + control_types = [ + 'canny', 'hough', 'hed', 'depth', 'normal', 'pose', 'seg', + 'fake_scribble', 'scribble' + ] + control_type = random.choice(control_types) pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='canny') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='hough') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='hed') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='depth') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='normal') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='pose') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='seg') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='fake_scribble') - output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] - - pipeline_ins = pipeline( - self.task, model=self.model_id, control_type='scribble') + self.task, model=self.model_id, control_type=control_type) output = pipeline_ins(input=self.input)[OutputKeys.OUTPUT_IMG] cv2.imwrite(output_image_path, output) print(