mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-24 12:09:22 +01:00
add transformer support for Qwen2vl (#1106)
* add qwen2vlconfig * rearrange --------- Co-authored-by: Yingda Chen <yingda.chen@alibaba-inc.com>
This commit is contained in:
@@ -32,16 +32,17 @@ if TYPE_CHECKING:
|
||||
build_dataset_from_file)
|
||||
from .utils.constant import Tasks
|
||||
if is_transformers_available():
|
||||
from .utils.hf_util import AutoConfig, GPTQConfig, AwqConfig, BitsAndBytesConfig
|
||||
from .utils.hf_util import (
|
||||
AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM,
|
||||
AutoModel, AutoProcessor, AutoFeatureExtractor, GenerationConfig,
|
||||
AutoConfig, GPTQConfig, AwqConfig, BitsAndBytesConfig,
|
||||
AutoModelForCausalLM, AutoModelForSeq2SeqLM,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoModelForTokenClassification, AutoModelForImageClassification,
|
||||
AutoModelForImageToImage, AutoModelForImageSegmentation,
|
||||
AutoModelForQuestionAnswering, AutoModelForMaskedLM, AutoTokenizer,
|
||||
AutoModelForMaskGeneration, AutoModelForPreTraining,
|
||||
AutoModelForTextEncoding, GenerationConfig, AutoImageProcessor,
|
||||
BatchFeature, T5EncoderModel)
|
||||
AutoModelForTextEncoding, AutoImageProcessor, BatchFeature,
|
||||
Qwen2VLForConditionalGeneration, T5EncoderModel)
|
||||
else:
|
||||
print(
|
||||
'transformer is not installed, please install it if you want to use related modules'
|
||||
@@ -95,17 +96,17 @@ else:
|
||||
|
||||
if is_transformers_available():
|
||||
_import_structure['utils.hf_util'] = [
|
||||
'AutoConfig', 'GenerationConfig', 'AutoModel', 'GPTQConfig',
|
||||
'AwqConfig', 'BitsAndBytesConfig', 'AutoModelForCausalLM',
|
||||
'AutoModelForSeq2SeqLM', 'AutoTokenizer',
|
||||
'AutoModelForSequenceClassification',
|
||||
'AutoModel', 'AutoProcessor', 'AutoFeatureExtractor',
|
||||
'GenerationConfig', 'AutoConfig', 'GPTQConfig', 'AwqConfig',
|
||||
'BitsAndBytesConfig', 'AutoModelForCausalLM',
|
||||
'AutoModelForSeq2SeqLM', 'AutoModelForSequenceClassification',
|
||||
'AutoModelForTokenClassification',
|
||||
'AutoModelForImageClassification', 'AutoModelForImageToImage',
|
||||
'AutoModelForQuestionAnswering', 'AutoModelForMaskedLM',
|
||||
'AutoModelForImageSegmentation', 'AutoModelForQuestionAnswering',
|
||||
'AutoModelForMaskedLM', 'AutoTokenizer',
|
||||
'AutoModelForMaskGeneration', 'AutoModelForPreTraining',
|
||||
'AutoModelForTextEncoding', 'AutoModelForTokenClassification',
|
||||
'AutoModelForImageSegmentation', 'AutoImageProcessor',
|
||||
'BatchFeature', 'T5EncoderModel'
|
||||
'AutoModelForTextEncoding', 'AutoImageProcessor', 'BatchFeature',
|
||||
'Qwen2VLForConditionalGeneration', 'T5EncoderModel'
|
||||
]
|
||||
|
||||
import sys
|
||||
|
||||
@@ -33,6 +33,8 @@ from transformers import BitsAndBytesConfig as BitsAndBytesConfigHF
|
||||
from transformers import GenerationConfig as GenerationConfigHF
|
||||
from transformers import (PretrainedConfig, PreTrainedModel,
|
||||
PreTrainedTokenizerBase)
|
||||
from transformers import \
|
||||
Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGenerationHF
|
||||
from transformers import T5EncoderModel as T5EncoderModelHF
|
||||
|
||||
from modelscope import snapshot_download
|
||||
@@ -335,6 +337,8 @@ AutoModelForMaskGeneration = get_wrapped_class(AutoModelForMaskGenerationHF)
|
||||
AutoModelForPreTraining = get_wrapped_class(AutoModelForPreTrainingHF)
|
||||
AutoModelForTextEncoding = get_wrapped_class(AutoModelForTextEncodingHF)
|
||||
T5EncoderModel = get_wrapped_class(T5EncoderModelHF)
|
||||
Qwen2VLForConditionalGeneration = get_wrapped_class(
|
||||
Qwen2VLForConditionalGenerationHF)
|
||||
|
||||
AutoTokenizer = get_wrapped_class(
|
||||
AutoTokenizerHF,
|
||||
|
||||
Reference in New Issue
Block a user