mirror of
https://github.com/hzwer/ECCV2022-RIFE.git
synced 2025-12-16 08:27:45 +01:00
Add RIFE_m
This commit is contained in:
@@ -13,7 +13,7 @@ from skimage.color import rgb2yuv, yuv2rgb
|
||||
from yuv_frame_io import YUV_Read,YUV_Write
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
model = Model()
|
||||
model = Model(arbitrary=True)
|
||||
model.load_model('train_log')
|
||||
model.eval()
|
||||
model.device()
|
||||
@@ -31,14 +31,21 @@ name_list = [
|
||||
('HD_dataset/HD544p_GT/Sintel_Temple1_1280x544.yuv', 544, 1280),
|
||||
('HD_dataset/HD544p_GT/Sintel_Temple2_1280x544.yuv', 544, 1280),
|
||||
]
|
||||
def inference(I0, I1, pad, multi=2):
|
||||
def inference(I0, I1, pad, multi=2, arbitrary=True):
|
||||
img = [I0, I1]
|
||||
for i in range(multi):
|
||||
res = [I0]
|
||||
for j in range(len(img) - 1):
|
||||
res.append(model.inference(img[j], img[j + 1]))
|
||||
res.append(img[j + 1])
|
||||
img = res
|
||||
if not arbitrary:
|
||||
for i in range(multi):
|
||||
res = [I0]
|
||||
for j in range(len(img) - 1):
|
||||
res.append(model.inference(img[j], img[j + 1]))
|
||||
res.append(img[j + 1])
|
||||
img = res
|
||||
else:
|
||||
img = [I0]
|
||||
p = 2**multi
|
||||
for i in range(p-1):
|
||||
img.append(model.inference(I0, I1, timestep=(i+1)*(1./p)))
|
||||
img.append(I1)
|
||||
for i in range(len(img)):
|
||||
img[i] = img[i][0][:, pad: -pad]
|
||||
return img[1: -1]
|
||||
|
||||
@@ -60,7 +60,7 @@ class IFNet(nn.Module):
|
||||
self.contextnet = Contextnet()
|
||||
self.unet = Unet()
|
||||
|
||||
def forward(self, x, scale=[4,2,1]):
|
||||
def forward(self, x, scale=[4,2,1], timestep=0.5):
|
||||
img0 = x[:, :3]
|
||||
img1 = x[:, 3:6]
|
||||
gt = x[:, 6:] # In inference time, gt is None
|
||||
|
||||
109
model/IFNet_m.py
Normal file
109
model/IFNet_m.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from model.warplayer import warp
|
||||
from model.refine import *
|
||||
|
||||
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
|
||||
return nn.Sequential(
|
||||
torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes, kernel_size=4, stride=2, padding=1),
|
||||
nn.PReLU(out_planes)
|
||||
)
|
||||
|
||||
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
|
||||
padding=padding, dilation=dilation, bias=True),
|
||||
nn.PReLU(out_planes)
|
||||
)
|
||||
|
||||
class IFBlock(nn.Module):
|
||||
def __init__(self, in_planes, c=64):
|
||||
super(IFBlock, self).__init__()
|
||||
self.conv0 = nn.Sequential(
|
||||
conv(in_planes, c//2, 3, 2, 1),
|
||||
conv(c//2, c, 3, 2, 1),
|
||||
)
|
||||
self.convblock = nn.Sequential(
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
conv(c, c),
|
||||
)
|
||||
self.lastconv = nn.ConvTranspose2d(c, 5, 4, 2, 1)
|
||||
|
||||
def forward(self, x, flow, scale):
|
||||
if scale != 1:
|
||||
x = F.interpolate(x, scale_factor = 1. / scale, mode="bilinear", align_corners=False)
|
||||
if flow != None:
|
||||
flow = F.interpolate(flow, scale_factor = 1. / scale, mode="bilinear", align_corners=False) * 1. / scale
|
||||
x = torch.cat((x, flow), 1)
|
||||
x = self.conv0(x)
|
||||
x = self.convblock(x) + x
|
||||
tmp = self.lastconv(x)
|
||||
tmp = F.interpolate(tmp, scale_factor = scale * 2, mode="bilinear", align_corners=False)
|
||||
flow = tmp[:, :4] * scale * 2
|
||||
mask = tmp[:, 4:5]
|
||||
return flow, mask
|
||||
|
||||
class IFNet_m(nn.Module):
|
||||
def __init__(self):
|
||||
super(IFNet_m, self).__init__()
|
||||
self.block0 = IFBlock(6+1, c=240)
|
||||
self.block1 = IFBlock(13+4+1, c=150)
|
||||
self.block2 = IFBlock(13+4+1, c=90)
|
||||
self.block_tea = IFBlock(16+4+1, c=90)
|
||||
self.contextnet = Contextnet()
|
||||
self.unet = Unet()
|
||||
|
||||
def forward(self, x, scale=[4,2,1], timestep=0.5):
|
||||
timestep = (x[:, :1].clone() * 0 + 1) * timestep
|
||||
img0 = x[:, :3]
|
||||
img1 = x[:, 3:6]
|
||||
gt = x[:, 6:] # In inference time, gt is None
|
||||
flow_list = []
|
||||
merged = []
|
||||
mask_list = []
|
||||
warped_img0 = img0
|
||||
warped_img1 = img1
|
||||
flow = None
|
||||
loss_distill = 0
|
||||
stu = [self.block0, self.block1, self.block2]
|
||||
for i in range(3):
|
||||
if flow != None:
|
||||
flow_d, mask_d = stu[i](torch.cat((img0, img1, timestep, warped_img0, warped_img1, mask), 1), flow, scale=scale[i])
|
||||
flow = flow + flow_d
|
||||
mask = mask + mask_d
|
||||
else:
|
||||
flow, mask = stu[i](torch.cat((img0, img1, timestep), 1), None, scale=scale[i])
|
||||
mask_list.append(torch.sigmoid(mask))
|
||||
flow_list.append(flow)
|
||||
warped_img0 = warp(img0, flow[:, :2])
|
||||
warped_img1 = warp(img1, flow[:, 2:4])
|
||||
merged_student = (warped_img0, warped_img1)
|
||||
merged.append(merged_student)
|
||||
if gt.shape[1] == 3:
|
||||
flow_d, mask_d = self.block_tea(torch.cat((img0, img1, timestep, warped_img0, warped_img1, mask, gt), 1), flow, scale=1)
|
||||
flow_teacher = flow + flow_d
|
||||
warped_img0_teacher = warp(img0, flow_teacher[:, :2])
|
||||
warped_img1_teacher = warp(img1, flow_teacher[:, 2:4])
|
||||
mask_teacher = torch.sigmoid(mask + mask_d)
|
||||
merged_teacher = warped_img0_teacher * mask_teacher + warped_img1_teacher * (1 - mask_teacher)
|
||||
else:
|
||||
flow_teacher = None
|
||||
merged_teacher = None
|
||||
for i in range(3):
|
||||
merged[i] = merged[i][0] * mask_list[i] + merged[i][1] * (1 - mask_list[i])
|
||||
if gt.shape[1] == 3:
|
||||
loss_mask = ((merged[i] - gt).abs().mean(1, True) > (merged_teacher - gt).abs().mean(1, True) + 0.01).float().detach()
|
||||
loss_distill += ((flow_teacher.detach() - flow_list[i]).abs() * loss_mask).mean()
|
||||
c0 = self.contextnet(img0, flow[:, :2])
|
||||
c1 = self.contextnet(img1, flow[:, 2:4])
|
||||
tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1)
|
||||
res = tmp[:, :3] * 2 - 1
|
||||
merged[2] = torch.clamp(merged[2] + res, 0, 1)
|
||||
return flow_list, mask_list[2], merged, flow_teacher, merged_teacher, loss_distill
|
||||
@@ -7,6 +7,7 @@ import itertools
|
||||
from model.warplayer import warp
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from model.IFNet import *
|
||||
from model.IFNet_m import *
|
||||
import torch.nn.functional as F
|
||||
from model.loss import *
|
||||
from model.laplacian import *
|
||||
@@ -15,8 +16,11 @@ from model.refine import *
|
||||
device = torch.device("cuda")
|
||||
|
||||
class Model:
|
||||
def __init__(self, local_rank=-1):
|
||||
self.flownet = IFNet()
|
||||
def __init__(self, local_rank=-1, arbitrary=False):
|
||||
if arbitrary == True:
|
||||
self.flownet = IFNet_m()
|
||||
else:
|
||||
self.flownet = IFNet()
|
||||
self.device()
|
||||
self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-3) # use large weight decay may avoid NaN loss
|
||||
self.epe = EPE()
|
||||
@@ -49,13 +53,13 @@ class Model:
|
||||
if rank == 0:
|
||||
torch.save(self.flownet.state_dict(),'{}/flownet.pkl'.format(path))
|
||||
|
||||
def inference(self, img0, img1, scale_list=[4, 2, 1], TTA=False):
|
||||
def inference(self, img0, img1, scale_list=[4, 2, 1], TTA=False, timestep=0.5):
|
||||
imgs = torch.cat((img0, img1), 1)
|
||||
flow, mask, merged, flow_teacher, merged_teacher, loss_distill = self.flownet(imgs, scale_list)
|
||||
flow, mask, merged, flow_teacher, merged_teacher, loss_distill = self.flownet(imgs, scale_list, timestep=timestep)
|
||||
if TTA == False:
|
||||
return merged[2]
|
||||
else:
|
||||
flow2, mask2, merged2, flow_teacher2, merged_teacher2, loss_distill2 = self.flownet(imgs.flip(2).flip(3), scale_list)
|
||||
flow2, mask2, merged2, flow_teacher2, merged_teacher2, loss_distill2 = self.flownet(imgs.flip(2).flip(3), scale_list, timestep=timestep)
|
||||
return (merged[2] + merged2[2].flip(2).flip(3)) / 2
|
||||
|
||||
def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
|
||||
|
||||
Reference in New Issue
Block a user