From 496b103abe38a3f07584590171f07a5e0e8fceda Mon Sep 17 00:00:00 2001 From: suluyan Date: Tue, 7 Jan 2025 21:16:44 +0800 Subject: [PATCH] fix lint --- .../models/nlp/hf_transformers/backbone.py | 7 +++-- .../models/nlp/polylm/text_generation.py | 7 +++-- .../msdatasets/data_loader/data_loader.py | 8 +++-- .../data_loader/data_loader_manager.py | 16 ++++++---- modelscope/msdatasets/ms_dataset.py | 7 +++-- modelscope/pipelines/accelerate/vllm.py | 11 ++++--- .../pipelines/multi_modal/ovis_vl_pipeline.py | 9 ++++-- modelscope/pipelines/nlp/llm_pipeline.py | 29 +++++++++++-------- .../pipelines/nlp/text_generation_pipeline.py | 29 +++++++++++-------- modelscope/utils/automodel_utils.py | 11 ++++--- modelscope/utils/plugins.py | 6 ++-- 11 files changed, 85 insertions(+), 55 deletions(-) diff --git a/modelscope/models/nlp/hf_transformers/backbone.py b/modelscope/models/nlp/hf_transformers/backbone.py index 10681d8b..8cb368f0 100644 --- a/modelscope/models/nlp/hf_transformers/backbone.py +++ b/modelscope/models/nlp/hf_transformers/backbone.py @@ -99,9 +99,10 @@ class TransformersModel(TorchModel, PreTrainedModel): return model # return the model only - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model_dir}.') config, kwargs = AutoConfig.from_pretrained( model_dir, return_unused_kwargs=True, diff --git a/modelscope/models/nlp/polylm/text_generation.py b/modelscope/models/nlp/polylm/text_generation.py index cf53157a..bdd6e991 100644 --- a/modelscope/models/nlp/polylm/text_generation.py +++ b/modelscope/models/nlp/polylm/text_generation.py @@ -27,9 +27,10 @@ class PolyLMForTextGeneration(TorchModel, StreamingOutputMixin): super().__init__(model_dir, *args, **kwargs) self.tokenizer = AutoTokenizer.from_pretrained( model_dir, legacy=False, use_fast=False) - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model_dir}.') self.model = AutoModelForCausalLM.from_pretrained( model_dir, device_map='auto', trust_remote_code=True) self.model.eval() diff --git a/modelscope/msdatasets/data_loader/data_loader.py b/modelscope/msdatasets/data_loader/data_loader.py index 9e1583f4..c1e374af 100644 --- a/modelscope/msdatasets/data_loader/data_loader.py +++ b/modelscope/msdatasets/data_loader/data_loader.py @@ -134,9 +134,11 @@ class OssDownloader(BaseDownloader): if dataset_py_script and dataset_formation == DatasetFormations.hf_compatible: if trust_remote_code: - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {dataset_name}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {dataset_name}.' + ) self.dataset = hf_load_dataset( dataset_py_script, diff --git a/modelscope/msdatasets/data_loader/data_loader_manager.py b/modelscope/msdatasets/data_loader/data_loader_manager.py index d59fc1d6..99315a31 100644 --- a/modelscope/msdatasets/data_loader/data_loader_manager.py +++ b/modelscope/msdatasets/data_loader/data_loader_manager.py @@ -72,9 +72,11 @@ class LocalDataLoaderManager(DataLoaderManager): # TODO: more loaders to be supported. if data_loader_type == LocalDataLoaderType.HF_DATA_LOADER: if trust_remote_code: - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {dataset_name}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {dataset_name}.' + ) # Build huggingface data loader and return dataset. return hf_data_loader( @@ -116,9 +118,11 @@ class RemoteDataLoaderManager(DataLoaderManager): # To use the huggingface data loader if data_loader_type == RemoteDataLoaderType.HF_DATA_LOADER: if trust_remote_code: - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {dataset_name}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {dataset_name}.' + ) dataset_ret = hf_data_loader( dataset_name, name=subset_name, diff --git a/modelscope/msdatasets/ms_dataset.py b/modelscope/msdatasets/ms_dataset.py index 28de17f6..0a259ac5 100644 --- a/modelscope/msdatasets/ms_dataset.py +++ b/modelscope/msdatasets/ms_dataset.py @@ -238,9 +238,10 @@ class MsDataset: raise 'The dataset_name should be in the form of `namespace/dataset_name` or `dataset_name`.' if trust_remote_code: - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {dataset_name}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {dataset_name}.') # Init context config dataset_context_config = DatasetContextConfig( diff --git a/modelscope/pipelines/accelerate/vllm.py b/modelscope/pipelines/accelerate/vllm.py index 599f520a..76569546 100644 --- a/modelscope/pipelines/accelerate/vllm.py +++ b/modelscope/pipelines/accelerate/vllm.py @@ -1,10 +1,12 @@ from typing import List, Union +from modelscope import get_logger from modelscope.pipelines.accelerate.base import InferFramework from modelscope.utils.import_utils import is_vllm_available -from modelscope import get_logger + logger = get_logger() + class Vllm(InferFramework): def __init__(self, @@ -28,9 +30,10 @@ class Vllm(InferFramework): if not Vllm.check_gpu_compatibility(8) and (dtype in ('bfloat16', 'auto')): dtype = 'float16' - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {self.model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {self.model_dir}.') self.model = LLM( self.model_dir, dtype=dtype, diff --git a/modelscope/pipelines/multi_modal/ovis_vl_pipeline.py b/modelscope/pipelines/multi_modal/ovis_vl_pipeline.py index 5e1e18a9..cffe3741 100644 --- a/modelscope/pipelines/multi_modal/ovis_vl_pipeline.py +++ b/modelscope/pipelines/multi_modal/ovis_vl_pipeline.py @@ -12,8 +12,10 @@ from modelscope.pipelines.multi_modal.visual_question_answering_pipeline import VisualQuestionAnsweringPipeline from modelscope.preprocessors import Preprocessor, load_image from modelscope.utils.constant import Fields, Frameworks, Tasks + logger = get_logger() + @PIPELINES.register_module( Tasks.visual_question_answering, module_name='ovis-vl') class VisionChatPipeline(VisualQuestionAnsweringPipeline): @@ -35,9 +37,10 @@ class VisionChatPipeline(VisualQuestionAnsweringPipeline): torch_dtype = kwargs.get('torch_dtype', torch.float16) multimodal_max_length = kwargs.get('multimodal_max_length', 8192) self.device = 'cuda' if device == 'gpu' else device - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model}.') self.model = AutoModelForCausalLM.from_pretrained( model, torch_dtype=torch_dtype, diff --git a/modelscope/pipelines/nlp/llm_pipeline.py b/modelscope/pipelines/nlp/llm_pipeline.py index ceaa4c90..9789de40 100644 --- a/modelscope/pipelines/nlp/llm_pipeline.py +++ b/modelscope/pipelines/nlp/llm_pipeline.py @@ -97,9 +97,10 @@ class LLMPipeline(Pipeline, PipelineStreamingOutputMixin): assert base_model is not None, 'Cannot get adapter_cfg.model_id_or_path from configuration.json file.' revision = self.cfg.safe_get('adapter_cfg.model_revision', 'master') - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {base_model}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {base_model}.') base_model = Model.from_pretrained( base_model, revision, @@ -137,9 +138,10 @@ class LLMPipeline(Pipeline, PipelineStreamingOutputMixin): model) else snapshot_download(model) # TODO: Temporary use of AutoModelForCausalLM # Need to be updated into a universal solution - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model_dir}.') model = AutoModelForCausalLM.from_pretrained( model_dir, device_map=self.device_map, @@ -179,9 +181,11 @@ class LLMPipeline(Pipeline, PipelineStreamingOutputMixin): self.llm_framework = llm_framework if os.path.exists(kwargs['model']): - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {kwargs['model']}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {kwargs["model"]}.' + ) config = AutoConfig.from_pretrained( kwargs['model'], trust_remote_code=True) q_config = config.__dict__.get('quantization_config', None) @@ -432,9 +436,10 @@ class LLMPipeline(Pipeline, PipelineStreamingOutputMixin): model_dir = self.model.model_dir if tokenizer_class is None: tokenizer_class = AutoTokenizer - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model_dir}.') return tokenizer_class.from_pretrained( model_dir, trust_remote_code=True) diff --git a/modelscope/pipelines/nlp/text_generation_pipeline.py b/modelscope/pipelines/nlp/text_generation_pipeline.py index 555e7a9d..b8de7df8 100644 --- a/modelscope/pipelines/nlp/text_generation_pipeline.py +++ b/modelscope/pipelines/nlp/text_generation_pipeline.py @@ -269,9 +269,10 @@ class ChatGLM6bV2TextGenerationPipeline(Pipeline): if use_bf16: default_torch_dtype = torch.bfloat16 torch_dtype = kwargs.get('torch_dtype', default_torch_dtype) - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model_dir}.') model = Model.from_pretrained( model_dir, trust_remote_code=True, @@ -288,9 +289,11 @@ class ChatGLM6bV2TextGenerationPipeline(Pipeline): self.model = model self.model.eval() - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {self.model.model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {self.model.model_dir}.' + ) self.tokenizer = AutoTokenizer.from_pretrained( self.model.model_dir, trust_remote_code=True) @@ -334,9 +337,10 @@ class QWenChatPipeline(Pipeline): bf16 = False if isinstance(model, str): - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model}.') self.tokenizer = AutoTokenizer.from_pretrained( model, revision=revision, trust_remote_code=True) self.model = AutoModelForCausalLM.from_pretrained( @@ -401,9 +405,10 @@ class QWenTextGenerationPipeline(Pipeline): bf16 = False if isinstance(model, str): - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model}.') self.model = AutoModelForCausalLM.from_pretrained( model, device_map=device_map, diff --git a/modelscope/utils/automodel_utils.py b/modelscope/utils/automodel_utils.py index 435d461d..70257b63 100644 --- a/modelscope/utils/automodel_utils.py +++ b/modelscope/utils/automodel_utils.py @@ -3,14 +3,16 @@ import os from types import MethodType from typing import Any, Optional +from modelscope import get_logger from modelscope.metainfo import Tasks from modelscope.utils.ast_utils import INDEX_KEY from modelscope.utils.import_utils import (LazyImportModule, is_torch_available, is_transformers_available) -from modelscope import get_logger + logger = get_logger() + def can_load_by_ms(model_dir: str, task_name: Optional[str], model_type: Optional[str]) -> bool: if model_type is None or task_name is None: @@ -92,9 +94,10 @@ def get_hf_automodel_class(model_dir: str, if not os.path.exists(config_path): return None try: - logger.warning('Use trust_remote_code=True. The code will be downloaded' - ' and used from the remote repo. Please make sure that' - f' the remote code content is what you need {model_dir}.') + logger.warning( + 'Use trust_remote_code=True. The code will be downloaded' + ' and used from the remote repo. Please make sure that' + f' the remote code content is what you need {model_dir}.') config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) if task_name is None: automodel_class = get_default_automodel(config) diff --git a/modelscope/utils/plugins.py b/modelscope/utils/plugins.py index f78d91c5..c99fc2b0 100644 --- a/modelscope/utils/plugins.py +++ b/modelscope/utils/plugins.py @@ -451,8 +451,10 @@ def register_plugins_repo(plugins: List[str]) -> None: def register_modelhub_repo(model_dir, allow_remote=False) -> None: """ Try to install and import remote model from modelhub""" if allow_remote: - logger.warning('Use allow_remote=True. The code will be downloaded and used from the remote repo.' - f' Please make sure that the remote code content is what you need {model_dir}.') + logger.warning( + 'Use allow_remote=True. The code will be downloaded and used from the remote repo.' + f' Please make sure that the remote code content is what you need {model_dir}.' + ) try: import_module_from_model_dir(model_dir) except KeyError: