From 6d9e6d57c02ba2d6b5ea4bf0e3e35e3d9524c433 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Mon, 25 Nov 2024 22:16:05 +0800 Subject: [PATCH] More automodel (#1098) * add more hf alias --------- Co-authored-by: Yingda Chen --- modelscope/__init__.py | 18 ++++++++++----- modelscope/utils/hf_util.py | 45 ++++++++++++++++++++++++++++++++----- 2 files changed, 52 insertions(+), 11 deletions(-) diff --git a/modelscope/__init__.py b/modelscope/__init__.py index d60a8c79..80cd861a 100644 --- a/modelscope/__init__.py +++ b/modelscope/__init__.py @@ -36,9 +36,12 @@ if TYPE_CHECKING: from .utils.hf_util import ( AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, - AutoModelForTokenClassification, AutoModelForImageSegmentation, - AutoTokenizer, GenerationConfig, AutoImageProcessor, BatchFeature, - T5EncoderModel) + AutoModelForTokenClassification, AutoModelForImageClassification, + AutoModelForImageToImage, AutoModelForImageSegmentation, + AutoModelForQuestionAnswering, AutoModelForMaskedLM, AutoTokenizer, + AutoModelForMaskGeneration, AutoModelForPreTraining, + AutoModelForTextEncoding, GenerationConfig, AutoImageProcessor, + BatchFeature, T5EncoderModel) else: print( 'transformer is not installed, please install it if you want to use related modules' @@ -96,8 +99,13 @@ else: 'AwqConfig', 'BitsAndBytesConfig', 'AutoModelForCausalLM', 'AutoModelForSeq2SeqLM', 'AutoTokenizer', 'AutoModelForSequenceClassification', - 'AutoModelForTokenClassification', 'AutoModelForImageSegmentation', - 'AutoImageProcessor', 'BatchFeature', 'T5EncoderModel' + 'AutoModelForTokenClassification', + 'AutoModelForImageClassification', 'AutoModelForImageToImage', + 'AutoModelForQuestionAnswering', 'AutoModelForMaskedLM', + 'AutoModelForMaskGeneration', 'AutoModelForPreTraining', + 'AutoModelForTextEncoding', 'AutoModelForTokenClassification', + 'AutoModelForImageSegmentation', 'AutoImageProcessor', + 'BatchFeature', 'T5EncoderModel' ] import sys diff --git a/modelscope/utils/hf_util.py b/modelscope/utils/hf_util.py index 9d517724..a67b7886 100644 --- a/modelscope/utils/hf_util.py +++ b/modelscope/utils/hf_util.py @@ -9,11 +9,21 @@ from transformers import AutoFeatureExtractor as AutoFeatureExtractorHF from transformers import AutoImageProcessor as AutoImageProcessorHF from transformers import AutoModel as AutoModelHF from transformers import AutoModelForCausalLM as AutoModelForCausalLMHF +from transformers import \ + AutoModelForImageClassification as AutoModelForImageClassificationHF from transformers import \ AutoModelForImageSegmentation as AutoModelForImageSegmentationHF +from transformers import AutoModelForImageToImage as AutoModelForImageToImageHF +from transformers import AutoModelForMaskedLM as AutoModelForMaskedLMHF +from transformers import \ + AutoModelForMaskGeneration as AutoModelForMaskGenerationHF +from transformers import AutoModelForPreTraining as AutoModelForPreTrainingHF +from transformers import \ + AutoModelForQuestionAnswering as AutoModelForQuestionAnsweringHF from transformers import AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLMHF from transformers import \ AutoModelForSequenceClassification as AutoModelForSequenceClassificationHF +from transformers import AutoModelForTextEncoding as AutoModelForTextEncodingHF from transformers import \ AutoModelForTokenClassification as AutoModelForTokenClassificationHF from transformers import AutoProcessor as AutoProcessorHF @@ -272,7 +282,7 @@ def get_wrapped_class(module_class, ignore_file_pattern = kwargs.pop('ignore_file_pattern', default_ignore_file_pattern) subfolder = kwargs.pop('subfolder', default_file_filter) - + file_filter = None if subfolder: file_filter = f'{subfolder}/*' if not os.path.exists(pretrained_model_name_or_path): @@ -315,25 +325,48 @@ AutoModelForTokenClassification = get_wrapped_class( AutoModelForTokenClassificationHF) AutoModelForImageSegmentation = get_wrapped_class( AutoModelForImageSegmentationHF) +AutoModelForImageClassification = get_wrapped_class( + AutoModelForImageClassificationHF) +AutoModelForImageToImage = get_wrapped_class(AutoModelForImageToImageHF) +AutoModelForQuestionAnswering = get_wrapped_class( + AutoModelForQuestionAnsweringHF) +AutoModelForMaskedLM = get_wrapped_class(AutoModelForMaskedLMHF) +AutoModelForMaskGeneration = get_wrapped_class(AutoModelForMaskGenerationHF) +AutoModelForPreTraining = get_wrapped_class(AutoModelForPreTrainingHF) +AutoModelForTextEncoding = get_wrapped_class(AutoModelForTextEncodingHF) T5EncoderModel = get_wrapped_class(T5EncoderModelHF) AutoTokenizer = get_wrapped_class( AutoTokenizerHF, ignore_file_pattern=[ - r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt' + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' + ]) +AutoProcessor = get_wrapped_class( + AutoProcessorHF, + ignore_file_pattern=[ + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' ]) AutoConfig = get_wrapped_class( AutoConfigHF, ignore_file_pattern=[ - r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt' + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' ]) GenerationConfig = get_wrapped_class( GenerationConfigHF, ignore_file_pattern=[ - r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt' + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' ]) +BitsAndBytesConfig = get_wrapped_class( + BitsAndBytesConfigHF, + ignore_file_pattern=[ + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' + ]) +AutoImageProcessor = get_wrapped_class( + AutoImageProcessorHF, + ignore_file_pattern=[ + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' + ]) + GPTQConfig = GPTQConfigHF AwqConfig = AwqConfigHF -BitsAndBytesConfig = BitsAndBytesConfigHF -AutoImageProcessor = get_wrapped_class(AutoImageProcessorHF) BatchFeature = get_wrapped_class(BatchFeatureHF)