diff --git a/data/test/images/face_liveness_xc.png b/data/test/images/face_liveness_xc.png new file mode 100644 index 00000000..54777f9e --- /dev/null +++ b/data/test/images/face_liveness_xc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0abad2347748bf312ab0dbce48fdc643a703d94970e1b181cf19b9be6312db8c +size 3145728 diff --git a/modelscope/metainfo.py b/modelscope/metainfo.py index 21cd7cb6..3fca7856 100644 --- a/modelscope/metainfo.py +++ b/modelscope/metainfo.py @@ -236,6 +236,7 @@ class Pipelines(object): face_detection = 'resnet-face-detection-scrfd10gkps' face_liveness_ir = 'manual-face-liveness-flir' face_liveness_rgb = 'manual-face-liveness-flir' + face_liveness_xc = 'manual-face-liveness-flxc' card_detection = 'resnet-card-detection-scrfd34gkps' ulfd_face_detection = 'manual-face-detection-ulfd' tinymog_face_detection = 'manual-face-detection-tinymog' diff --git a/modelscope/pipelines/cv/face_liveness_xc_pipeline.py b/modelscope/pipelines/cv/face_liveness_xc_pipeline.py new file mode 100644 index 00000000..dbe19be1 --- /dev/null +++ b/modelscope/pipelines/cv/face_liveness_xc_pipeline.py @@ -0,0 +1,87 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os.path as osp +from typing import Any, Dict + +import cv2 +import numpy as np +import onnxruntime +import PIL +import torch +import torch.nn.functional as F + +from modelscope.metainfo import Pipelines +from modelscope.models.cv.face_recognition.align_face import align_face +from modelscope.models.cv.facial_landmark_confidence import \ + FacialLandmarkConfidence +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.pipelines.base import Input, Pipeline +from modelscope.pipelines.builder import PIPELINES +from modelscope.preprocessors import LoadImage +from modelscope.utils.constant import ModelFile, Tasks +from modelscope.utils.logger import get_logger +from . import FaceProcessingBasePipeline + +logger = get_logger() + + +@PIPELINES.register_module( + Tasks.face_liveness, module_name=Pipelines.face_liveness_xc) +class FaceLivenessXcPipeline(FaceProcessingBasePipeline): + + def __init__(self, model: str, **kwargs): + """ + FaceLivenessXcPipeline can judge the input face is a real or fake face. + use `model` to create a face lievness ir pipeline for prediction + Args: + model: model id on modelscope hub. + ```python + >>> from modelscope.pipelines import pipeline + >>> fl_xc = pipeline('face_liveness', 'damo/cv_manual_face-liveness_flxc') + >>> fl_xc("https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/face_liveness_xc.png") + {'scores': [0.03821974992752075], 'boxes': [[12.569677352905273, 6.428711891174316, + 94.17887115478516, 106.74441528320312]]} + ``` + """ + super().__init__(model=model, **kwargs) + onnx_path = osp.join(model, ModelFile.ONNX_MODEL_FILE) + logger.info(f'loading model from {onnx_path}') + self.sess, self.input_node_name, self.out_node_name = self.load_onnx_model( + onnx_path) + logger.info('load model done') + + def load_onnx_model(self, onnx_path): + sess = onnxruntime.InferenceSession(onnx_path) + out_node_name = [] + input_node_name = [] + for node in sess.get_outputs(): + out_node_name.append(node.name) + + for node in sess.get_inputs(): + input_node_name.append(node.name) + + return sess, input_node_name, out_node_name + + def preprocess(self, input: Input) -> Dict[str, Any]: + result = super().preprocess(input) + img = result['img'] + img = (img - 127.5) * 0.0078125 + img = np.expand_dims(img, 0).copy() + input_tensor = np.concatenate([img, img, img, img], axis=3) + input_tensor = np.transpose( + input_tensor, axes=(0, 3, 1, 2)).astype(np.float32) + result['input_tensor'] = input_tensor + return result + + def forward(self, input: Dict[str, Any]) -> Dict[str, Any]: + input_feed = {} + input_feed[ + self.input_node_name[0]] = input['input_tensor'].cpu().numpy() + result = self.sess.run(self.out_node_name, input_feed=input_feed) + scores = [result[0][0][0].tolist()] + + boxes = input['bbox'].cpu().numpy()[np.newaxis, :].tolist() + return {OutputKeys.SCORES: scores, OutputKeys.BOXES: boxes} + + def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + return inputs diff --git a/tests/pipelines/test_face_liveness_xc.py b/tests/pipelines/test_face_liveness_xc.py new file mode 100644 index 00000000..91b46e01 --- /dev/null +++ b/tests/pipelines/test_face_liveness_xc.py @@ -0,0 +1,38 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os.path as osp +import unittest + +import cv2 + +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.utils.cv.image_utils import draw_face_detection_no_lm_result +from modelscope.utils.test_utils import test_level + + +class FaceLivenessXcTest(unittest.TestCase): + + def setUp(self) -> None: + self.model_id = 'damo/cv_manual_face-liveness_flxc' + self.img_path = 'data/test/images/face_liveness_rgb.png' + + def show_result(self, img_path, detection_result): + img = draw_face_detection_no_lm_result(img_path, detection_result) + cv2.imwrite('result.png', img) + print(f'output written to {osp.abspath("result.png")}') + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_run_modelhub(self): + face_detection = pipeline(Tasks.face_liveness, model=self.model_id) + result = face_detection(self.img_path) + self.show_result(self.img_path, result) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_run_default_model(self): + face_detection = pipeline(Tasks.face_liveness) + result = face_detection(self.img_path) + self.show_result(self.img_path, result) + + +if __name__ == '__main__': + unittest.main()