update license and revert support camouflaged-detection from latest master

Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/11892922

    * update license and revert support camouflaged-detection from latest master
This commit is contained in:
wendi.hwd
2023-03-07 11:41:10 +08:00
committed by wenmeng.zwm
parent 04f63ec71f
commit 99fa2fe909
9 changed files with 304 additions and 12 deletions

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4c713215f7fb4da5382c9137347ee52956a7a44d5979c4cffd3c9b6d1d7e878f
size 19445

View File

@@ -1,3 +1,4 @@
# The implementation is adopted from U-2-Net, made publicly available under the Apache 2.0 License
# source code avaiable via https://github.com/xuebinqin/U-2-Net
from .senet import SENet
from .u2net import U2NET

View File

@@ -1,6 +1,5 @@
# Implementation in this file is modified based on Res2Net-PretrainedModels
# Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License
# publicly available at https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net_v1b.py
# Implementation in this file is modified based on SINet-V2,made publicly available under the Apache 2.0 License
# publicly available at https://github.com/GewelsJI/SINet-V2
import math
import torch

View File

@@ -1,6 +1,5 @@
# Implementation in this file is modified based on Res2Net-PretrainedModels
# Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License
# publicly available at https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net_v1b.py
# Implementation in this file is modified based on SINet-V2,made publicly available under the Apache 2.0 License
# publicly available at https://github.com/GewelsJI/SINet-V2
from .Res2Net_v1b import res2net50_v1b_26w_4s
__all__ = ['res2net50_v1b_26w_4s']

View File

@@ -0,0 +1,178 @@
# Copyright (c) Alibaba, Inc. and its affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import ConvBNReLU
class AreaLayer(nn.Module):
def __init__(self, in_channel, out_channel):
super(AreaLayer, self).__init__()
self.lbody = nn.Sequential(
nn.Conv2d(out_channel, out_channel, 1),
nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True))
self.hbody = nn.Sequential(
nn.Conv2d(in_channel, out_channel, 1), nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True))
self.body = nn.Sequential(
nn.Conv2d(2 * out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True),
nn.Conv2d(out_channel, 1, 1))
def forward(self, xl, xh):
xl1 = self.lbody(xl)
xl1 = F.interpolate(
xl1, size=xh.size()[2:], mode='bilinear', align_corners=True)
xh1 = self.hbody(xh)
x = torch.cat((xl1, xh1), dim=1)
x_out = self.body(x)
return x_out
class EdgeLayer(nn.Module):
def __init__(self, in_channel, out_channel):
super(EdgeLayer, self).__init__()
self.lbody = nn.Sequential(
nn.Conv2d(out_channel, out_channel, 1),
nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True))
self.hbody = nn.Sequential(
nn.Conv2d(in_channel, out_channel, 1), nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True))
self.bodye = nn.Sequential(
nn.Conv2d(2 * out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True),
nn.Conv2d(out_channel, 1, 1))
def forward(self, xl, xh):
xl1 = self.lbody(xl)
xh1 = self.hbody(xh)
xh1 = F.interpolate(
xh1, size=xl.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((xl1, xh1), dim=1)
x_out = self.bodye(x)
return x_out
class EBlock(nn.Module):
def __init__(self, inchs, outchs):
super(EBlock, self).__init__()
self.elayer = nn.Sequential(
ConvBNReLU(inchs + 1, outchs, kernel_size=3, padding=1, stride=1),
ConvBNReLU(outchs, outchs, 1))
self.salayer = nn.Sequential(
nn.Conv2d(2, 1, 3, 1, 1, bias=False),
nn.BatchNorm2d(1, momentum=0.01), nn.Sigmoid())
def forward(self, x, edgeAtten):
x = torch.cat((x, edgeAtten), dim=1)
ex = self.elayer(x)
ex_max = torch.max(ex, 1, keepdim=True)[0]
ex_mean = torch.mean(ex, dim=1, keepdim=True)
xei_compress = torch.cat((ex_max, ex_mean), dim=1)
scale = self.salayer(xei_compress)
x_out = ex * scale
return x_out
class StructureE(nn.Module):
def __init__(self, inchs, outchs, EM):
super(StructureE, self).__init__()
self.ne_modules = int(inchs / EM)
NM = int(outchs / self.ne_modules)
elayes = []
for i in range(self.ne_modules):
emblock = EBlock(EM, NM)
elayes.append(emblock)
self.emlayes = nn.ModuleList(elayes)
self.body = nn.Sequential(
ConvBNReLU(outchs, outchs, 3, 1, 1), ConvBNReLU(outchs, outchs, 1))
def forward(self, x, edgeAtten):
if edgeAtten.size() != x.size():
edgeAtten = F.interpolate(
edgeAtten, x.size()[2:], mode='bilinear', align_corners=False)
xx = torch.chunk(x, self.ne_modules, dim=1)
efeas = []
for i in range(self.ne_modules):
xei = self.emlayes[i](xx[i], edgeAtten)
efeas.append(xei)
efeas = torch.cat(efeas, dim=1)
x_out = self.body(efeas)
return x_out
class ABlock(nn.Module):
def __init__(self, inchs, outchs, k):
super(ABlock, self).__init__()
self.alayer = nn.Sequential(
ConvBNReLU(inchs, outchs, k, 1, k // 2),
ConvBNReLU(outchs, outchs, 1))
self.arlayer = nn.Sequential(
ConvBNReLU(inchs, outchs, k, 1, k // 2),
ConvBNReLU(outchs, outchs, 1))
self.fusion = ConvBNReLU(2 * outchs, outchs, 1)
def forward(self, x, areaAtten):
xa = x * areaAtten
xra = x * (1 - areaAtten)
xout = self.fusion(torch.cat((xa, xra), dim=1))
return xout
class AMFusion(nn.Module):
def __init__(self, inchs, outchs, AM):
super(AMFusion, self).__init__()
self.k = [3, 3, 5, 5]
self.conv_up = ConvBNReLU(inchs, outchs, 3, 1, 1)
self.up = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
self.na_modules = int(outchs / AM)
alayers = []
for i in range(self.na_modules):
layer = ABlock(AM, AM, self.k[i])
alayers.append(layer)
self.alayers = nn.ModuleList(alayers)
self.fusion_0 = ConvBNReLU(outchs, outchs, 3, 1, 1)
self.fusion_e = nn.Sequential(
nn.Conv2d(
outchs, outchs, kernel_size=(3, 1), padding=(1, 0),
bias=False), nn.BatchNorm2d(outchs), nn.ReLU(inplace=True),
nn.Conv2d(
outchs, outchs, kernel_size=(1, 3), padding=(0, 1),
bias=False), nn.BatchNorm2d(outchs), nn.ReLU(inplace=True))
self.fusion_e1 = nn.Sequential(
nn.Conv2d(
outchs, outchs, kernel_size=(5, 1), padding=(2, 0),
bias=False), nn.BatchNorm2d(outchs), nn.ReLU(inplace=True),
nn.Conv2d(
outchs, outchs, kernel_size=(1, 5), padding=(0, 2),
bias=False), nn.BatchNorm2d(outchs), nn.ReLU(inplace=True))
self.fusion = ConvBNReLU(3 * outchs, outchs, 1)
def forward(self, xl, xh, xhm):
xh1 = self.up(self.conv_up(xh))
x = xh1 + xl
xm = self.up(torch.sigmoid(xhm))
xx = torch.chunk(x, self.na_modules, dim=1)
xxmids = []
for i in range(self.na_modules):
xi = self.alayers[i](xx[i], xm)
xxmids.append(xi)
xfea = torch.cat(xxmids, dim=1)
x0 = self.fusion_0(xfea)
x1 = self.fusion_e(xfea)
x2 = self.fusion_e1(xfea)
x_out = self.fusion(torch.cat((x0, x1, x2), dim=1))
return x_out

View File

@@ -0,0 +1,74 @@
# Copyright (c) Alibaba, Inc. and its affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from .backbone import res2net50_v1b_26w_4s as res2net
from .modules import AMFusion, AreaLayer, EdgeLayer, StructureE
from .utils import ASPP, CBAM, ConvBNReLU
class SENet(nn.Module):
def __init__(self, backbone_path=None, pretrained=False):
super(SENet, self).__init__()
resnet50 = res2net(backbone_path, pretrained)
self.layer0_1 = nn.Sequential(resnet50.conv1, resnet50.bn1,
resnet50.relu)
self.maxpool = resnet50.maxpool
self.layer1 = resnet50.layer1
self.layer2 = resnet50.layer2
self.layer3 = resnet50.layer3
self.layer4 = resnet50.layer4
self.aspp3 = ASPP(1024, 256)
self.aspp4 = ASPP(2048, 256)
self.cbblock3 = CBAM(inchs=256, kernel_size=5)
self.cbblock4 = CBAM(inchs=256, kernel_size=5)
self.up = nn.Upsample(
mode='bilinear', scale_factor=2, align_corners=False)
self.conv_up = ConvBNReLU(512, 512, 1)
self.aux_edge = EdgeLayer(512, 256)
self.aux_area = AreaLayer(512, 256)
self.layer1_enhance = StructureE(256, 128, 128)
self.layer2_enhance = StructureE(512, 256, 128)
self.layer3_decoder = AMFusion(512, 256, 128)
self.layer2_decoder = AMFusion(256, 128, 128)
self.out_conv_8 = nn.Conv2d(256, 1, 1)
self.out_conv_4 = nn.Conv2d(128, 1, 1)
def forward(self, x):
layer0 = self.layer0_1(x)
layer0s = self.maxpool(layer0)
layer1 = self.layer1(layer0s)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer3_eh = self.cbblock3(self.aspp3(layer3))
layer4_eh = self.cbblock4(self.aspp4(layer4))
layer34 = self.conv_up(
torch.cat((self.up(layer4_eh), layer3_eh), dim=1))
edge_atten = self.aux_edge(layer1, layer34)
area_atten = self.aux_area(layer1, layer34)
edge_atten_ = torch.sigmoid(edge_atten)
layer1_eh = self.layer1_enhance(layer1, edge_atten_)
layer2_eh = self.layer2_enhance(layer2, edge_atten_)
layer2_fu = self.layer3_decoder(layer2_eh, layer34, area_atten)
out_8 = self.out_conv_8(layer2_fu)
layer1_fu = self.layer2_decoder(layer1_eh, layer2_fu, out_8)
out_4 = self.out_conv_4(layer1_fu)
out_16 = F.interpolate(
area_atten,
size=x.size()[2:],
mode='bilinear',
align_corners=False)
out_8 = F.interpolate(
out_8, size=x.size()[2:], mode='bilinear', align_corners=False)
out_4 = F.interpolate(
out_4, size=x.size()[2:], mode='bilinear', align_corners=False)
edge_out = F.interpolate(
edge_atten_,
size=x.size()[2:],
mode='bilinear',
align_corners=False)
return out_4.sigmoid(), out_8.sigmoid(), out_16.sigmoid(), edge_out

View File

@@ -2,7 +2,6 @@
import os.path as osp
import cv2
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
@@ -10,8 +9,9 @@ from torchvision import transforms
from modelscope.metainfo import Models
from modelscope.models.base.base_torch_model import TorchModel
from modelscope.models.builder import MODELS
from modelscope.utils.config import Config
from modelscope.utils.constant import ModelFile, Tasks
from .models import U2NET
from .models import U2NET, SENet
@MODELS.register_module(
@@ -22,13 +22,25 @@ class SalientDetection(TorchModel):
"""str -- model file root."""
super().__init__(model_dir, *args, **kwargs)
model_path = osp.join(model_dir, ModelFile.TORCH_MODEL_FILE)
self.model = U2NET(3, 1)
self.norm_mean = [0.485, 0.456, 0.406]
self.norm_std = [0.229, 0.224, 0.225]
self.norm_size = (320, 320)
config_path = osp.join(model_dir, 'config.py')
if osp.exists(config_path) is False:
self.model = U2NET(3, 1)
else:
self.model = SENet(backbone_path=None, pretrained=False)
config = Config.from_file(config_path)
self.norm_mean = config.norm_mean
self.norm_std = config.norm_std
self.norm_size = config.norm_size
checkpoint = torch.load(model_path, map_location='cpu')
self.transform_input = transforms.Compose([
transforms.Resize((320, 320)),
transforms.Resize(self.norm_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transforms.Normalize(mean=self.norm_mean, std=self.norm_std)
])
self.model.load_state_dict(checkpoint)
self.model.eval()

View File

@@ -12,6 +12,11 @@ from modelscope.utils.constant import Tasks
@PIPELINES.register_module(
Tasks.semantic_segmentation, module_name=Pipelines.salient_detection)
@PIPELINES.register_module(
Tasks.semantic_segmentation,
module_name=Pipelines.salient_boudary_detection)
@PIPELINES.register_module(
Tasks.semantic_segmentation, module_name=Pipelines.camouflaged_detection)
class ImageSalientDetectionPipeline(Pipeline):
def __init__(self, model: str, **kwargs):

View File

@@ -23,6 +23,27 @@ class SalientDetectionTest(unittest.TestCase, DemoCompatibilityCheck):
import cv2
cv2.imwrite(input_location + '_salient.jpg', result[OutputKeys.MASKS])
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_salient_boudary_detection(self):
input_location = 'data/test/images/image_salient_detection.jpg'
model_id = 'damo/cv_res2net_salient-detection'
salient_detect = pipeline(Tasks.semantic_segmentation, model=model_id)
result = salient_detect(input_location)
import cv2
cv2.imwrite(input_location + '_boudary_salient.jpg',
result[OutputKeys.MASKS])
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
def test_camouflag_detection(self):
input_location = 'data/test/images/image_camouflag_detection.jpg'
model_id = 'damo/cv_res2net_camouflaged-detection'
camouflag_detect = pipeline(
Tasks.semantic_segmentation, model=model_id)
result = camouflag_detect(input_location)
import cv2
cv2.imwrite(input_location + '_camouflag.jpg',
result[OutputKeys.MASKS])
@unittest.skip('demo compatibility test is only enabled on a needed-basis')
def test_demo_compatibility(self):
self.compatibility_check()