mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-16 16:27:45 +01:00
fix ci compatible issues,fix llmpipeline lazy import issue (#725)
* fix ci issue * fix case issue * modify lint to python3.10 * fix case issue --------- Co-authored-by: mulin.lyh <mulin.lyh@taobao.com>
This commit is contained in:
@@ -177,13 +177,6 @@ else
|
||||
# pre compile extension
|
||||
docker_file_content="${docker_file_content} \nRUN pip uninstall -y tb-nightly && pip install --no-cache-dir -U tensorboard && TORCH_CUDA_ARCH_LIST='6.0 6.1 7.0 7.5 8.0 8.9 9.0 8.6+PTX' python -c 'from modelscope.utils.pre_compile import pre_compile_all;pre_compile_all()'"
|
||||
fi
|
||||
# install here for easycv extension conflict.
|
||||
docker_file_content="${docker_file_content} \nRUN if [ \"$USE_GPU\" = \"True\" ] ; then \
|
||||
bash /tmp/install_tiny_cuda_nn.sh; \
|
||||
else \
|
||||
echo 'cpu unsupport tiny_cuda_nn'; \
|
||||
fi"
|
||||
|
||||
if [ "$is_ci_test" == "True" ]; then
|
||||
echo "Building CI image, uninstall modelscope"
|
||||
docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y"
|
||||
|
||||
4
.github/workflows/lint.yaml
vendored
4
.github/workflows/lint.yaml
vendored
@@ -11,10 +11,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.7
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: '3.10'
|
||||
- name: Install pre-commit hook
|
||||
run: |
|
||||
pip install pre-commit
|
||||
|
||||
@@ -34,10 +34,13 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||
fi
|
||||
|
||||
# torchmetrics==0.11.4 for ofa
|
||||
# tinycudann for cuda12.1.0 pytorch 2.1.2
|
||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr optimum && \
|
||||
pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu121/ && \
|
||||
pip install --no-cache-dir -U xformers --index-url https://download.pytorch.org/whl/cu121 && \
|
||||
pip install --no-cache-dir --force https://modelscope.oss-cn-beijing.aliyuncs.com/packages/tinycudann-1.7-cp310-cp310-linux_x86_64.whl && \
|
||||
pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter && \
|
||||
pip install --no-cache-dir -U flash_attn vllm; \
|
||||
else \
|
||||
echo 'cpu unsupport vllm auto-gptq'; \
|
||||
|
||||
@@ -17,11 +17,11 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from diffusers.models.activations import get_activation
|
||||
from diffusers.models.attention import AdaGroupNorm
|
||||
from diffusers.models.attention_processor import (Attention,
|
||||
AttnAddedKVProcessor,
|
||||
AttnAddedKVProcessor2_0)
|
||||
from diffusers.models.dual_transformer_2d import DualTransformer2DModel
|
||||
from diffusers.models.normalization import AdaLayerNorm
|
||||
from diffusers.models.resnet import (Downsample2D, FirDownsample2D,
|
||||
FirUpsample2D, KDownsample2D, KUpsample2D,
|
||||
ResnetBlock2D, Upsample2D)
|
||||
|
||||
@@ -9,8 +9,7 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from mmcv.cnn import ConvModule
|
||||
from timm.models.layers.drop import drop_path
|
||||
from timm.models.layers.weight_init import trunc_normal_
|
||||
from timm.models.layers import drop_path, trunc_normal_
|
||||
|
||||
from .common import Upsample, resize
|
||||
|
||||
|
||||
@@ -11,8 +11,7 @@ from collections import OrderedDict
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.checkpoint as checkpoint
|
||||
from timm.models.layers.drop import drop_path
|
||||
from timm.models.layers.weight_init import trunc_normal_
|
||||
from timm.models.layers import drop_path, trunc_normal_
|
||||
from torch import nn
|
||||
|
||||
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from mmcv.cnn import ConvModule
|
||||
from timm.models.layers.drop import drop_path
|
||||
from timm.models.layers.weight_init import trunc_normal_
|
||||
from timm.models.layers import drop_path, trunc_normal_
|
||||
|
||||
from .common import resize
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ def setup_model(args):
|
||||
if args.load_pretrained is not None:
|
||||
args.no_load_optim = True
|
||||
args.load = args.load_pretrained
|
||||
args.no_load_rng = True
|
||||
_ = load_checkpoint(model, None, None, args)
|
||||
|
||||
return model
|
||||
|
||||
@@ -224,7 +224,7 @@ def llm_first_checker(model: Union[str, List[str], Model, List[Model]],
|
||||
|
||||
|
||||
def clear_llm_info(kwargs: Dict):
|
||||
from .nlp.llm_pipeline import ModelTypeHelper
|
||||
from modelscope.utils.model_type_helper import ModelTypeHelper
|
||||
|
||||
kwargs.pop('llm_first', None)
|
||||
ModelTypeHelper.clear_cache()
|
||||
|
||||
@@ -19,72 +19,11 @@ from modelscope.pipelines.util import is_model, is_official_hub_path
|
||||
from modelscope.utils.config import Config
|
||||
from modelscope.utils.constant import Invoke, ModelFile, Tasks
|
||||
from modelscope.utils.logger import get_logger
|
||||
from modelscope.utils.model_type_helper import ModelTypeHelper
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
class ModelTypeHelper:
|
||||
|
||||
current_model_type = None
|
||||
|
||||
@staticmethod
|
||||
def _get_file_name(model: str, cfg_name: str,
|
||||
revision: Optional[str]) -> Optional[str]:
|
||||
if osp.exists(model):
|
||||
return osp.join(model, cfg_name)
|
||||
try:
|
||||
return model_file_download(model, cfg_name, revision=revision)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _parse_and_get(file: Optional[str], pattern: str) -> Optional[str]:
|
||||
if file is None or not osp.exists(file):
|
||||
return None
|
||||
return Config.from_file(file).safe_get(pattern)
|
||||
|
||||
@classmethod
|
||||
def _get(cls, model: str, revision: Optional[str]) -> Optional[str]:
|
||||
cfg_file = cls._get_file_name(model, ModelFile.CONFIGURATION, revision)
|
||||
hf_cfg_file = cls._get_file_name(model, ModelFile.CONFIG, revision)
|
||||
cfg_model_type = cls._parse_and_get(cfg_file, 'model.type')
|
||||
hf_cfg_model_type = cls._parse_and_get(hf_cfg_file, 'model_type')
|
||||
return cfg_model_type or hf_cfg_model_type
|
||||
|
||||
@classmethod
|
||||
def _get_adapter(cls, model: str,
|
||||
revision: Optional[str]) -> Optional[str]:
|
||||
cfg_file = cls._get_file_name(model, ModelFile.CONFIGURATION, revision)
|
||||
model = cls._parse_and_get(cfg_file, 'adapter_cfg.model_id_or_path')
|
||||
revision = cls._parse_and_get(cfg_file, 'adapter_cfg.model_revision')
|
||||
return None if model is None else cls._get(model, revision)
|
||||
|
||||
@classmethod
|
||||
def get(cls,
|
||||
model: str,
|
||||
revision: Optional[str] = None,
|
||||
with_adapter: bool = False,
|
||||
split: Optional[str] = None,
|
||||
use_cache: bool = False) -> Optional[str]:
|
||||
if use_cache and cls.current_model_type:
|
||||
return cls.current_model_type
|
||||
model_type = cls._get(model, revision)
|
||||
if model_type is None and with_adapter:
|
||||
model_type = cls._get_adapter(model, revision)
|
||||
if model_type is None:
|
||||
return None
|
||||
model_type = model_type.lower()
|
||||
if split is not None:
|
||||
model_type = model_type.split(split)[0]
|
||||
if use_cache:
|
||||
cls.current_model_type = model_type
|
||||
return model_type
|
||||
|
||||
@classmethod
|
||||
def clear_cache(cls):
|
||||
cls.current_model_type = None
|
||||
|
||||
|
||||
class LLMAdapterRegistry:
|
||||
|
||||
llm_format_map = {'qwen': [None, None, None]}
|
||||
|
||||
@@ -56,7 +56,7 @@ class OfaASRPreprocessor(OfaBasePreprocessor):
|
||||
def _build_train_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
speed = random.choice([0.9, 1.0, 1.1])
|
||||
audio_bytes = self.get_audio_bytes(data[self.column_map['wav']])
|
||||
wav, sr = librosa.load(audio_bytes, 16000, mono=True)
|
||||
wav, sr = librosa.load(audio_bytes, sr=16000, mono=True)
|
||||
fbank = self.prepare_fbank(
|
||||
torch.tensor([wav], dtype=torch.float32),
|
||||
sr,
|
||||
@@ -94,7 +94,7 @@ class OfaASRPreprocessor(OfaBasePreprocessor):
|
||||
def _build_infer_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
speed = 1.0
|
||||
audio_bytes = self.get_audio_bytes(data[self.column_map['wav']])
|
||||
wav, sr = librosa.load(audio_bytes, 16000, mono=True)
|
||||
wav, sr = librosa.load(audio_bytes, sr=16000, mono=True)
|
||||
fbank = self.prepare_fbank(
|
||||
torch.tensor([wav], dtype=torch.float32),
|
||||
sr,
|
||||
|
||||
68
modelscope/utils/model_type_helper.py
Normal file
68
modelscope/utils/model_type_helper.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import os.path as osp
|
||||
from typing import Optional
|
||||
|
||||
from modelscope.hub.file_download import model_file_download
|
||||
from modelscope.utils.config import Config
|
||||
from modelscope.utils.constant import ModelFile
|
||||
|
||||
|
||||
class ModelTypeHelper:
|
||||
|
||||
current_model_type = None
|
||||
|
||||
@staticmethod
|
||||
def _get_file_name(model: str, cfg_name: str,
|
||||
revision: Optional[str]) -> Optional[str]:
|
||||
if osp.exists(model):
|
||||
return osp.join(model, cfg_name)
|
||||
try:
|
||||
return model_file_download(model, cfg_name, revision=revision)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _parse_and_get(file: Optional[str], pattern: str) -> Optional[str]:
|
||||
if file is None or not osp.exists(file):
|
||||
return None
|
||||
return Config.from_file(file).safe_get(pattern)
|
||||
|
||||
@classmethod
|
||||
def _get(cls, model: str, revision: Optional[str]) -> Optional[str]:
|
||||
cfg_file = cls._get_file_name(model, ModelFile.CONFIGURATION, revision)
|
||||
hf_cfg_file = cls._get_file_name(model, ModelFile.CONFIG, revision)
|
||||
cfg_model_type = cls._parse_and_get(cfg_file, 'model.type')
|
||||
hf_cfg_model_type = cls._parse_and_get(hf_cfg_file, 'model_type')
|
||||
return cfg_model_type or hf_cfg_model_type
|
||||
|
||||
@classmethod
|
||||
def _get_adapter(cls, model: str,
|
||||
revision: Optional[str]) -> Optional[str]:
|
||||
cfg_file = cls._get_file_name(model, ModelFile.CONFIGURATION, revision)
|
||||
model = cls._parse_and_get(cfg_file, 'adapter_cfg.model_id_or_path')
|
||||
revision = cls._parse_and_get(cfg_file, 'adapter_cfg.model_revision')
|
||||
return None if model is None else cls._get(model, revision)
|
||||
|
||||
@classmethod
|
||||
def get(cls,
|
||||
model: str,
|
||||
revision: Optional[str] = None,
|
||||
with_adapter: bool = False,
|
||||
split: Optional[str] = None,
|
||||
use_cache: bool = False) -> Optional[str]:
|
||||
if use_cache and cls.current_model_type:
|
||||
return cls.current_model_type
|
||||
model_type = cls._get(model, revision)
|
||||
if model_type is None and with_adapter:
|
||||
model_type = cls._get_adapter(model, revision)
|
||||
if model_type is None:
|
||||
return None
|
||||
model_type = model_type.lower()
|
||||
if split is not None:
|
||||
model_type = model_type.split(split)[0]
|
||||
if use_cache:
|
||||
cls.current_model_type = model_type
|
||||
return model_type
|
||||
|
||||
@classmethod
|
||||
def clear_cache(cls):
|
||||
cls.current_model_type = None
|
||||
@@ -104,7 +104,7 @@ def download_and_untar(fpath, furl, dst) -> str:
|
||||
|
||||
def get_case_model_info():
|
||||
status_code, result = subprocess.getstatusoutput(
|
||||
'grep -rn "damo/" tests/ | grep -v ".pyc" | grep -v "Binary file" | grep -v run.py '
|
||||
'grep -rn "damo/" tests/ | grep -v "*.pyc" | grep -v "Binary file" | grep -v run.py '
|
||||
)
|
||||
lines = result.split('\n')
|
||||
test_cases = OrderedDict()
|
||||
@@ -116,7 +116,6 @@ def get_case_model_info():
|
||||
test_file = elements[0]
|
||||
model_pos = line.find('damo')
|
||||
if model_pos == -1 or (model_pos - 1) > len(line):
|
||||
print('Processing line: %s failed' % line)
|
||||
continue
|
||||
left_quote = line[model_pos - 1]
|
||||
rquote_idx = line.rfind(left_quote)
|
||||
|
||||
@@ -9,6 +9,8 @@ import uuid
|
||||
from modelscope.hub.api import HubApi
|
||||
from modelscope.utils.test_utils import TEST_ACCESS_TOKEN1, TEST_MODEL_ORG
|
||||
|
||||
os.environ['MKL_THREADING_LAYER'] = 'GNU'
|
||||
|
||||
|
||||
class ModelUploadCMDTest(unittest.TestCase):
|
||||
|
||||
|
||||
@@ -24,7 +24,9 @@ class TestExportFaceDetectionSCRFD(unittest.TestCase):
|
||||
os.makedirs(self.tmp_dir)
|
||||
self.model_id = 'damo/cv_resnet_facedetection_scrfd10gkps'
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
@unittest.skipUnless(
|
||||
test_level() >= 1,
|
||||
'Skip for export issue of not <protocol "torch.Tensor"> or tuple ')
|
||||
def test_export_face_detection_scrfd(self):
|
||||
model = Model.from_pretrained(self.model_id)
|
||||
print(Exporter.from_model(model).export_onnx(output_dir=self.tmp_dir))
|
||||
|
||||
@@ -15,9 +15,9 @@ class AnydoorTest(unittest.TestCase):
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
def test_run(self):
|
||||
ref_image = 'https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_anydoor_fg.png'
|
||||
ref_image = 'https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_anydoor_fg.jpg'
|
||||
ref_mask = 'https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_anydoor_fg_mask.png'
|
||||
bg_image = 'https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_anydoor_bg.jpg'
|
||||
bg_image = 'https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_anydoor_bg.png'
|
||||
bg_mask = 'https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_anydoor_bg_mask.png'
|
||||
save_path = 'data/test/images/image_anydoor_gen.png'
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ class CustomPipelineTest(unittest.TestCase):
|
||||
**kwargs):
|
||||
super().__init__(config_file, model, preprocessor, **kwargs)
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
with self.assertRaises(AttributeError):
|
||||
CustomPipeline1()
|
||||
|
||||
def test_batch(self):
|
||||
|
||||
@@ -31,7 +31,9 @@ class ImageTo3DTest(unittest.TestCase):
|
||||
np_content = np.concatenate(np_content, axis=1)
|
||||
Image.fromarray(np_content).save('./concat.png')
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
@unittest.skipUnless(
|
||||
test_level() >= 1,
|
||||
'skip for no test data: data/test/images/basketball.png')
|
||||
def test_run_modelhub(self):
|
||||
image_to_3d = pipeline(
|
||||
Tasks.image_to_3d, model=self.model_id, revision='v1.0.1')
|
||||
|
||||
@@ -438,6 +438,10 @@ def run_in_subprocess(args):
|
||||
'test_hub_revision.py',
|
||||
'test_hub_revision_release_mode.py',
|
||||
'test_hub_upload.py',
|
||||
'test_custom_pipeline_cmd.py',
|
||||
'test_download_cmd.py',
|
||||
'test_modelcard_cmd.py',
|
||||
'test_plugins_cmd.py',
|
||||
]
|
||||
test_suite_files = [
|
||||
x for x in test_suite_files if x not in non_parallelizable_suites
|
||||
@@ -501,10 +505,7 @@ class TimeCostTextTestResult(TextTestResult):
|
||||
self.stream.writeln(
|
||||
'Test case: %s stop at: %s, cost time: %s(seconds)' %
|
||||
(test.test_full_name, test.stop_time, test.time_cost))
|
||||
if torch.cuda.is_available(
|
||||
) and test.time_cost > 5.0: # print nvidia-smi
|
||||
cmd = ['nvidia-smi']
|
||||
run_command_with_popen(cmd)
|
||||
|
||||
super(TimeCostTextTestResult, self).stopTest(test)
|
||||
|
||||
def addSuccess(self, test):
|
||||
|
||||
Reference in New Issue
Block a user