diff --git a/modelscope/__init__.py b/modelscope/__init__.py index 80cd861a..b7712b3b 100644 --- a/modelscope/__init__.py +++ b/modelscope/__init__.py @@ -32,16 +32,17 @@ if TYPE_CHECKING: build_dataset_from_file) from .utils.constant import Tasks if is_transformers_available(): - from .utils.hf_util import AutoConfig, GPTQConfig, AwqConfig, BitsAndBytesConfig from .utils.hf_util import ( - AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, + AutoModel, AutoProcessor, AutoFeatureExtractor, GenerationConfig, + AutoConfig, GPTQConfig, AwqConfig, BitsAndBytesConfig, + AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelForImageClassification, AutoModelForImageToImage, AutoModelForImageSegmentation, AutoModelForQuestionAnswering, AutoModelForMaskedLM, AutoTokenizer, AutoModelForMaskGeneration, AutoModelForPreTraining, - AutoModelForTextEncoding, GenerationConfig, AutoImageProcessor, - BatchFeature, T5EncoderModel) + AutoModelForTextEncoding, AutoImageProcessor, BatchFeature, + Qwen2VLForConditionalGeneration, T5EncoderModel) else: print( 'transformer is not installed, please install it if you want to use related modules' @@ -95,17 +96,17 @@ else: if is_transformers_available(): _import_structure['utils.hf_util'] = [ - 'AutoConfig', 'GenerationConfig', 'AutoModel', 'GPTQConfig', - 'AwqConfig', 'BitsAndBytesConfig', 'AutoModelForCausalLM', - 'AutoModelForSeq2SeqLM', 'AutoTokenizer', - 'AutoModelForSequenceClassification', + 'AutoModel', 'AutoProcessor', 'AutoFeatureExtractor', + 'GenerationConfig', 'AutoConfig', 'GPTQConfig', 'AwqConfig', + 'BitsAndBytesConfig', 'AutoModelForCausalLM', + 'AutoModelForSeq2SeqLM', 'AutoModelForSequenceClassification', 'AutoModelForTokenClassification', 'AutoModelForImageClassification', 'AutoModelForImageToImage', - 'AutoModelForQuestionAnswering', 'AutoModelForMaskedLM', + 'AutoModelForImageSegmentation', 'AutoModelForQuestionAnswering', + 'AutoModelForMaskedLM', 'AutoTokenizer', 'AutoModelForMaskGeneration', 'AutoModelForPreTraining', - 'AutoModelForTextEncoding', 'AutoModelForTokenClassification', - 'AutoModelForImageSegmentation', 'AutoImageProcessor', - 'BatchFeature', 'T5EncoderModel' + 'AutoModelForTextEncoding', 'AutoImageProcessor', 'BatchFeature', + 'Qwen2VLForConditionalGeneration', 'T5EncoderModel' ] import sys diff --git a/modelscope/utils/hf_util.py b/modelscope/utils/hf_util.py index a67b7886..f6613f98 100644 --- a/modelscope/utils/hf_util.py +++ b/modelscope/utils/hf_util.py @@ -33,6 +33,8 @@ from transformers import BitsAndBytesConfig as BitsAndBytesConfigHF from transformers import GenerationConfig as GenerationConfigHF from transformers import (PretrainedConfig, PreTrainedModel, PreTrainedTokenizerBase) +from transformers import \ + Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGenerationHF from transformers import T5EncoderModel as T5EncoderModelHF from modelscope import snapshot_download @@ -335,6 +337,8 @@ AutoModelForMaskGeneration = get_wrapped_class(AutoModelForMaskGenerationHF) AutoModelForPreTraining = get_wrapped_class(AutoModelForPreTrainingHF) AutoModelForTextEncoding = get_wrapped_class(AutoModelForTextEncodingHF) T5EncoderModel = get_wrapped_class(T5EncoderModelHF) +Qwen2VLForConditionalGeneration = get_wrapped_class( + Qwen2VLForConditionalGenerationHF) AutoTokenizer = get_wrapped_class( AutoTokenizerHF,