2022-06-27 11:57:22 +08:00
|
|
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
|
|
|
|
|
|
|
|
import unittest
|
|
|
|
|
|
2022-08-17 18:40:55 +08:00
|
|
|
import torch
|
2022-06-27 11:57:22 +08:00
|
|
|
|
|
|
|
|
from modelscope.models import Model
|
2022-08-17 18:40:55 +08:00
|
|
|
from modelscope.outputs import OutputKeys
|
2022-06-27 11:57:22 +08:00
|
|
|
from modelscope.pipelines import pipeline
|
|
|
|
|
from modelscope.utils.constant import Tasks
|
|
|
|
|
from modelscope.utils.test_utils import test_level
|
|
|
|
|
|
|
|
|
|
|
2023-05-22 10:53:18 +08:00
|
|
|
class MultiModalEmbeddingTest(unittest.TestCase):
|
2022-09-08 14:08:51 +08:00
|
|
|
|
|
|
|
|
def setUp(self) -> None:
|
|
|
|
|
self.task = Tasks.multi_modal_embedding
|
|
|
|
|
self.model_id = 'damo/multi-modal_clip-vit-base-patch16_zh'
|
|
|
|
|
|
2022-08-17 18:40:55 +08:00
|
|
|
test_input = {'text': '皮卡丘'}
|
2022-06-27 11:57:22 +08:00
|
|
|
|
2022-08-17 18:40:55 +08:00
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
2022-06-27 11:57:22 +08:00
|
|
|
def test_run(self):
|
2022-08-17 18:40:55 +08:00
|
|
|
pipeline_multi_modal_embedding = pipeline(
|
2022-10-24 23:40:38 +08:00
|
|
|
Tasks.multi_modal_embedding, model=self.model_id)
|
2022-10-30 21:51:11 +08:00
|
|
|
text_embedding = pipeline_multi_modal_embedding.forward(
|
2022-08-17 18:40:55 +08:00
|
|
|
self.test_input)[OutputKeys.TEXT_EMBEDDING]
|
|
|
|
|
print('l1-norm: {}'.format(
|
|
|
|
|
torch.norm(text_embedding, p=1, dim=-1).item()))
|
|
|
|
|
print('l2-norm: {}'.format(torch.norm(text_embedding,
|
|
|
|
|
dim=-1).item())) # should be 1.0
|
|
|
|
|
|
|
|
|
|
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
|
2022-06-27 11:57:22 +08:00
|
|
|
def test_run_with_model_from_modelhub(self):
|
2022-10-24 23:40:38 +08:00
|
|
|
model = Model.from_pretrained(self.model_id)
|
2022-08-17 18:40:55 +08:00
|
|
|
pipeline_multi_modal_embedding = pipeline(
|
2022-08-30 17:59:15 +08:00
|
|
|
task=Tasks.multi_modal_embedding, model=model)
|
2022-10-30 21:51:11 +08:00
|
|
|
text_embedding = pipeline_multi_modal_embedding.forward(
|
2022-08-17 18:40:55 +08:00
|
|
|
self.test_input)[OutputKeys.TEXT_EMBEDDING]
|
|
|
|
|
print('l1-norm: {}'.format(
|
|
|
|
|
torch.norm(text_embedding, p=1, dim=-1).item()))
|
|
|
|
|
print('l2-norm: {}'.format(torch.norm(text_embedding,
|
|
|
|
|
dim=-1).item())) # should be 1.0
|
|
|
|
|
|
|
|
|
|
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
|
2022-06-27 11:57:22 +08:00
|
|
|
def test_run_with_default_model(self):
|
2022-08-17 18:40:55 +08:00
|
|
|
pipeline_multi_modal_embedding = pipeline(
|
2022-10-24 23:40:38 +08:00
|
|
|
task=Tasks.multi_modal_embedding)
|
2022-10-30 21:51:11 +08:00
|
|
|
text_embedding = pipeline_multi_modal_embedding.forward(
|
2022-08-17 18:40:55 +08:00
|
|
|
self.test_input)[OutputKeys.TEXT_EMBEDDING]
|
|
|
|
|
print('l1-norm: {}'.format(
|
|
|
|
|
torch.norm(text_embedding, p=1, dim=-1).item()))
|
|
|
|
|
print('l2-norm: {}'.format(torch.norm(text_embedding,
|
|
|
|
|
dim=-1).item())) # should be 1.0
|
2022-06-27 11:57:22 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
unittest.main()
|