Files
modelscope/tests/pipelines/test_generative_multi_modal_embedding.py
chaojie.mcj 0e52a20d28 [to #42322933]update license
以下算法进行了header变更:
modelscope.models.cv.cmdssl_video_embedding
modelscope.models.cv.action_recognition 
modelscope.models.cv.animal_recognition
modelscope.models.multi_modal.multi_stage_diffusion
modelscope.models.multi_modal.gemm

modelscope.pipelines.cv.live_category_pipeline
modelscope.pipelines.cv.video_category_pipeline
modelscope.models.cv.image_to_image_translation
modelscope.models.cv.image_to_image_generation

modelscope.models.cv.video_inpainting
modelscope.models.multi_modal.diffusion
modelscope.models.multi_modal.team
modelscope.models.cv.shop_segmentation
modelscope.models.cv.text_driven_segmentation
modelscope.models.cv.action_recognition 


modelscope.models.cv.face_emotion
modelscope.models.cv.hand_static
        Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10268474
2022-09-28 14:30:37 +08:00

78 lines
3.3 KiB
Python

# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved.
import unittest
from modelscope.models import Model
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.demo_utils import DemoCompatibilityCheck
from modelscope.utils.test_utils import test_level
class GEMMMultiModalEmbeddingTest(unittest.TestCase, DemoCompatibilityCheck):
def setUp(self) -> None:
self.task = Tasks.generative_multi_modal_embedding
self.model_id = 'damo/multi-modal_gemm-vit-large-patch14_generative-multi-modal-embedding'
test_input = {
'image': 'data/test/images/generative_multimodal.jpg',
'text':
'interior design of modern living room with fireplace in a new house',
'captioning': False
}
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_run(self):
generative_multi_modal_embedding_pipeline = pipeline(
Tasks.generative_multi_modal_embedding, model=self.model_id)
output = generative_multi_modal_embedding_pipeline(self.test_input)
print(output)
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_default_model(self):
generative_multi_modal_embedding_pipeline = pipeline(
task=Tasks.generative_multi_modal_embedding)
output = generative_multi_modal_embedding_pipeline(self.test_input)
print(output)
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
def test_run_with_model_from_modelhub(self):
model = Model.from_pretrained(self.model_id)
generative_multi_modal_embedding_pipeline = pipeline(
task=Tasks.generative_multi_modal_embedding, model=model)
output = generative_multi_modal_embedding_pipeline(self.test_input)
print(output)
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_output_captioning(self):
generative_multi_modal_embedding_pipeline = pipeline(
task=Tasks.generative_multi_modal_embedding, model=self.model_id)
test_input = {'image': self.test_input['image'], 'captioning': True}
output = generative_multi_modal_embedding_pipeline(test_input)
print(output)
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_output_only_image(self):
generative_multi_modal_embedding_pipeline = pipeline(
task=Tasks.generative_multi_modal_embedding, model=self.model_id)
test_input = {'image': self.test_input['image'], 'captioning': False}
output = generative_multi_modal_embedding_pipeline(test_input)
print(output)
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
def test_run_with_output_only_text(self):
generative_multi_modal_embedding_pipeline = pipeline(
task=Tasks.generative_multi_modal_embedding, model=self.model_id)
test_input = {'text': self.test_input['text']}
output = generative_multi_modal_embedding_pipeline(test_input)
print(output)
@unittest.skip('demo compatibility test is only enabled on a needed-basis')
def test_demo_compatibility(self):
self.compatibility_check()
if __name__ == '__main__':
unittest.main()