diff --git a/modelscope/models/multi_modal/ofa_for_all_tasks.py b/modelscope/models/multi_modal/ofa_for_all_tasks.py index 7ca01d7f..41ca1f0b 100644 --- a/modelscope/models/multi_modal/ofa_for_all_tasks.py +++ b/modelscope/models/multi_modal/ofa_for_all_tasks.py @@ -187,13 +187,14 @@ class OfaForAllTasks(TorchModel): valid_size = len(val_ans) valid_tgt_items = [ torch.cat([ - torch.tensor(decoder_prompt[1:]), valid_answer, + torch.tensor(decoder_prompt[1:]).to('cpu'), valid_answer, self.eos_item ]) for decoder_prompt in input['decoder_prompts'] for valid_answer in val_ans ] valid_prev_items = [ - torch.cat([torch.tensor(decoder_prompt), valid_answer]) + torch.cat( + [torch.tensor(decoder_prompt).to('cpu'), valid_answer]) for decoder_prompt in input['decoder_prompts'] for valid_answer in val_ans ] diff --git a/tests/pipelines/test_ofa_tasks.py b/tests/pipelines/test_ofa_tasks.py index 4bdb394a..d89e5d48 100644 --- a/tests/pipelines/test_ofa_tasks.py +++ b/tests/pipelines/test_ofa_tasks.py @@ -37,19 +37,6 @@ class OfaTasksTest(unittest.TestCase, DemoCompatibilityCheck): result = img_captioning({'image': image}) print(result[OutputKeys.CAPTION]) - @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') - def test_run_with_image_captioning_zh_with_model(self): - model = Model.from_pretrained( - '/apsarapangu/disk2/yichang.zyc/ckpt/MaaS/ofa_image-caption_coco_base_zh' - ) - img_captioning = pipeline( - task=Tasks.image_captioning, - model=model, - ) - image = 'data/test/images/image_captioning.png' - result = img_captioning({'image': image}) - print(result[OutputKeys.CAPTION]) - @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_image_captioning_with_name(self): img_captioning = pipeline(