From 27bf8fab1e43bb4408dc368f29675db454f8c1aa Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Mon, 25 Nov 2024 12:14:47 +0800 Subject: [PATCH] add more hf alias --- modelscope/__init__.py | 14 ++++++++--- modelscope/utils/hf_util.py | 46 +++++++++++++++++++++++++++++++++---- 2 files changed, 52 insertions(+), 8 deletions(-) diff --git a/modelscope/__init__.py b/modelscope/__init__.py index d60a8c79..13e6f6a3 100644 --- a/modelscope/__init__.py +++ b/modelscope/__init__.py @@ -36,9 +36,12 @@ if TYPE_CHECKING: from .utils.hf_util import ( AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, - AutoModelForTokenClassification, AutoModelForImageSegmentation, - AutoTokenizer, GenerationConfig, AutoImageProcessor, BatchFeature, - T5EncoderModel) + AutoModelForTokenClassification, AutoModelForImageClassification, + AutoModelForImageTextToText, AutoModelForImageToImage, + AutoModelForImageSegmentation, AutoModelForQuestionAnswering, + AutoModelForMaskedLM, AutoTokenizer, AutoModelForMaskGeneration, + AutoModelForPreTraining, AutoModelForTextEncoding, + GenerationConfig, AutoImageProcessor, BatchFeature, T5EncoderModel) else: print( 'transformer is not installed, please install it if you want to use related modules' @@ -96,6 +99,11 @@ else: 'AwqConfig', 'BitsAndBytesConfig', 'AutoModelForCausalLM', 'AutoModelForSeq2SeqLM', 'AutoTokenizer', 'AutoModelForSequenceClassification', + 'AutoModelForTokenClassification', + 'AutoModelForImageClassification', 'AutoModelForImageTextToText', + 'AutoModelForImageToImage', 'AutoModelForQuestionAnswering', + 'AutoModelForMaskedLM', 'AutoModelForMaskGeneration', + 'AutoModelForPreTraining', 'AutoModelForTextEncoding', 'AutoModelForTokenClassification', 'AutoModelForImageSegmentation', 'AutoImageProcessor', 'BatchFeature', 'T5EncoderModel' ] diff --git a/modelscope/utils/hf_util.py b/modelscope/utils/hf_util.py index 9d517724..57b0d3fe 100644 --- a/modelscope/utils/hf_util.py +++ b/modelscope/utils/hf_util.py @@ -9,11 +9,23 @@ from transformers import AutoFeatureExtractor as AutoFeatureExtractorHF from transformers import AutoImageProcessor as AutoImageProcessorHF from transformers import AutoModel as AutoModelHF from transformers import AutoModelForCausalLM as AutoModelForCausalLMHF +from transformers import \ + AutoModelForImageClassification as AutoModelForImageClassificationHF from transformers import \ AutoModelForImageSegmentation as AutoModelForImageSegmentationHF +from transformers import \ + AutoModelForImageTextToText as AutoModelForImageTextToTextHF +from transformers import AutoModelForImageToImage as AutoModelForImageToImageHF +from transformers import AutoModelForMaskedLM as AutoModelForMaskedLMHF +from transformers import \ + AutoModelForMaskGeneration as AutoModelForMaskGenerationHF +from transformers import AutoModelForPreTraining as AutoModelForPreTrainingHF +from transformers import \ + AutoModelForQuestionAnswering as AutoModelForQuestionAnsweringHF from transformers import AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLMHF from transformers import \ AutoModelForSequenceClassification as AutoModelForSequenceClassificationHF +from transformers import AutoModelForTextEncoding as AutoModelForTextEncodingHF from transformers import \ AutoModelForTokenClassification as AutoModelForTokenClassificationHF from transformers import AutoProcessor as AutoProcessorHF @@ -315,25 +327,49 @@ AutoModelForTokenClassification = get_wrapped_class( AutoModelForTokenClassificationHF) AutoModelForImageSegmentation = get_wrapped_class( AutoModelForImageSegmentationHF) +AutoModelForImageClassification = get_wrapped_class( + AutoModelForImageClassificationHF) +AutoModelForImageTextToText = get_wrapped_class(AutoModelForImageTextToTextHF) +AutoModelForImageToImage = get_wrapped_class(AutoModelForImageToImageHF) +AutoModelForQuestionAnswering = get_wrapped_class( + AutoModelForQuestionAnsweringHF) +AutoModelForMaskedLM = get_wrapped_class(AutoModelForMaskedLMHF) +AutoModelForMaskGeneration = get_wrapped_class(AutoModelForMaskGenerationHF) +AutoModelForPreTraining = get_wrapped_class(AutoModelForPreTrainingHF) +AutoModelForTextEncoding = get_wrapped_class(AutoModelForTextEncodingHF) T5EncoderModel = get_wrapped_class(T5EncoderModelHF) AutoTokenizer = get_wrapped_class( AutoTokenizerHF, ignore_file_pattern=[ - r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt' + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' + ]) +AutoProcessor = get_wrapped_class( + AutoProcessorHF, + ignore_file_pattern=[ + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' ]) AutoConfig = get_wrapped_class( AutoConfigHF, ignore_file_pattern=[ - r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt' + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' ]) GenerationConfig = get_wrapped_class( GenerationConfigHF, ignore_file_pattern=[ - r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt' + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' ]) +BitsAndBytesConfig = get_wrapped_class( + BitsAndBytesConfigHF, + ignore_file_pattern=[ + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' + ]) +AutoImageProcessor = AutoImageProcessorHF( + BitsAndBytesConfigHF, + ignore_file_pattern=[ + r'\w+\.bin', r'\w+\.safetensors', r'\w+\.pth', r'\w+\.pt', r'\w+\.h5' + ]) + GPTQConfig = GPTQConfigHF AwqConfig = AwqConfigHF -BitsAndBytesConfig = BitsAndBytesConfigHF -AutoImageProcessor = get_wrapped_class(AutoImageProcessorHF) BatchFeature = get_wrapped_class(BatchFeatureHF)