add transformer support for Qwen2vl (#1106)

* add qwen2vlconfig

* rearrange

---------

Co-authored-by: Yingda Chen <yingda.chen@alibaba-inc.com>
This commit is contained in:
Yingda Chen
2024-11-28 20:08:14 +08:00
committed by GitHub
parent e2bd302175
commit 3e13cc899b
2 changed files with 17 additions and 12 deletions

View File

@@ -32,16 +32,17 @@ if TYPE_CHECKING:
build_dataset_from_file)
from .utils.constant import Tasks
if is_transformers_available():
from .utils.hf_util import AutoConfig, GPTQConfig, AwqConfig, BitsAndBytesConfig
from .utils.hf_util import (
AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM,
AutoModel, AutoProcessor, AutoFeatureExtractor, GenerationConfig,
AutoConfig, GPTQConfig, AwqConfig, BitsAndBytesConfig,
AutoModelForCausalLM, AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification, AutoModelForImageClassification,
AutoModelForImageToImage, AutoModelForImageSegmentation,
AutoModelForQuestionAnswering, AutoModelForMaskedLM, AutoTokenizer,
AutoModelForMaskGeneration, AutoModelForPreTraining,
AutoModelForTextEncoding, GenerationConfig, AutoImageProcessor,
BatchFeature, T5EncoderModel)
AutoModelForTextEncoding, AutoImageProcessor, BatchFeature,
Qwen2VLForConditionalGeneration, T5EncoderModel)
else:
print(
'transformer is not installed, please install it if you want to use related modules'
@@ -95,17 +96,17 @@ else:
if is_transformers_available():
_import_structure['utils.hf_util'] = [
'AutoConfig', 'GenerationConfig', 'AutoModel', 'GPTQConfig',
'AwqConfig', 'BitsAndBytesConfig', 'AutoModelForCausalLM',
'AutoModelForSeq2SeqLM', 'AutoTokenizer',
'AutoModelForSequenceClassification',
'AutoModel', 'AutoProcessor', 'AutoFeatureExtractor',
'GenerationConfig', 'AutoConfig', 'GPTQConfig', 'AwqConfig',
'BitsAndBytesConfig', 'AutoModelForCausalLM',
'AutoModelForSeq2SeqLM', 'AutoModelForSequenceClassification',
'AutoModelForTokenClassification',
'AutoModelForImageClassification', 'AutoModelForImageToImage',
'AutoModelForQuestionAnswering', 'AutoModelForMaskedLM',
'AutoModelForImageSegmentation', 'AutoModelForQuestionAnswering',
'AutoModelForMaskedLM', 'AutoTokenizer',
'AutoModelForMaskGeneration', 'AutoModelForPreTraining',
'AutoModelForTextEncoding', 'AutoModelForTokenClassification',
'AutoModelForImageSegmentation', 'AutoImageProcessor',
'BatchFeature', 'T5EncoderModel'
'AutoModelForTextEncoding', 'AutoImageProcessor', 'BatchFeature',
'Qwen2VLForConditionalGeneration', 'T5EncoderModel'
]
import sys

View File

@@ -33,6 +33,8 @@ from transformers import BitsAndBytesConfig as BitsAndBytesConfigHF
from transformers import GenerationConfig as GenerationConfigHF
from transformers import (PretrainedConfig, PreTrainedModel,
PreTrainedTokenizerBase)
from transformers import \
Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGenerationHF
from transformers import T5EncoderModel as T5EncoderModelHF
from modelscope import snapshot_download
@@ -335,6 +337,8 @@ AutoModelForMaskGeneration = get_wrapped_class(AutoModelForMaskGenerationHF)
AutoModelForPreTraining = get_wrapped_class(AutoModelForPreTrainingHF)
AutoModelForTextEncoding = get_wrapped_class(AutoModelForTextEncodingHF)
T5EncoderModel = get_wrapped_class(T5EncoderModelHF)
Qwen2VLForConditionalGeneration = get_wrapped_class(
Qwen2VLForConditionalGenerationHF)
AutoTokenizer = get_wrapped_class(
AutoTokenizerHF,