mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-19 01:29:24 +01:00
1. 添加ocr recognition相关的exporter,支持现有三类模型转onnx 2. 更新lightweight模型 Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/14135723 * add ocr recognition export and update lightweight model * fix
191 lines
8.1 KiB
Python
191 lines
8.1 KiB
Python
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
import unittest
|
|
|
|
import PIL
|
|
|
|
from modelscope.pipelines import pipeline
|
|
from modelscope.pipelines.base import Pipeline
|
|
from modelscope.utils.constant import Tasks
|
|
from modelscope.utils.test_utils import test_level
|
|
|
|
|
|
class OCRRecognitionTest(unittest.TestCase):
|
|
|
|
def setUp(self) -> None:
|
|
self.model_id = 'damo/cv_convnextTiny_ocr-recognition-general_damo'
|
|
self.test_image = 'data/test/images/ocr_recognition.jpg'
|
|
self.task = Tasks.ocr_recognition
|
|
|
|
def pipeline_inference(self, pipeline: Pipeline, input_location: str):
|
|
result = pipeline(input_location)
|
|
print('ocr recognition results: ', result)
|
|
|
|
def pipeline_inference_batch(self, pipeline: Pipeline,
|
|
input_location: str):
|
|
result = pipeline(
|
|
[input_location, input_location, input_location, input_location],
|
|
batch_size=4)
|
|
print('ocr recognition results: ', result)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_batch(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model=self.model_id,
|
|
model_revision='v2.3.0')
|
|
self.pipeline_inference_batch(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model=self.model_id,
|
|
model_revision='v2.3.0')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_handwritten(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-handwritten_damo',
|
|
model_revision='v2.3.0')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_scene(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-scene_damo',
|
|
model_revision='v2.3.0')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_document(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-document_damo',
|
|
model_revision='v2.3.0')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_licenseplate(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-licenseplate_damo',
|
|
model_revision='v2.3.0')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_crnn(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_crnn_ocr-recognition-general_damo',
|
|
model_revision='v2.2.2')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_lightweightedge(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_LightweightEdge_ocr-recognitoin-general_damo',
|
|
model_revision='v2.4.1')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_PILinput(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model=self.model_id,
|
|
model_revision='v2.3.0')
|
|
imagePIL = PIL.Image.open(self.test_image)
|
|
self.pipeline_inference(ocr_recognition, imagePIL)
|
|
|
|
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
|
|
def test_run_modelhub_default_model(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition, model_revision='v2.3.0')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model=self.model_id,
|
|
model_revision='v2.3.0',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_handwritten_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-handwritten_damo',
|
|
model_revision='v2.3.0',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_scene_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-scene_damo',
|
|
model_revision='v2.3.0',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_document_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-document_damo',
|
|
model_revision='v2.3.0',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_licenseplate_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_convnextTiny_ocr-recognition-licenseplate_damo',
|
|
model_revision='v2.3.0',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_crnn_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_crnn_ocr-recognition-general_damo',
|
|
model_revision='v2.2.2',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_lightweightedge_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model='damo/cv_LightweightEdge_ocr-recognitoin-general_damo',
|
|
model_revision='v2.4.1',
|
|
device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
@unittest.skipUnless(test_level() >= 1, 'skip test in current test level')
|
|
def test_run_with_model_from_modelhub_PILinput_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition,
|
|
model=self.model_id,
|
|
model_revision='v2.3.0',
|
|
device='cpu')
|
|
imagePIL = PIL.Image.open(self.test_image)
|
|
self.pipeline_inference(ocr_recognition, imagePIL)
|
|
|
|
@unittest.skipUnless(test_level() >= 2, 'skip test in current test level')
|
|
def test_run_modelhub_default_model_cpu(self):
|
|
ocr_recognition = pipeline(
|
|
Tasks.ocr_recognition, model_revision='v2.3.0', device='cpu')
|
|
self.pipeline_inference(ocr_recognition, self.test_image)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|