diff --git a/inference_video.py b/inference_video.py index 609bb04..8f1d095 100644 --- a/inference_video.py +++ b/inference_video.py @@ -73,8 +73,8 @@ while success: if success: if args.montage: frame = frame[:, left: left + w] - I0 = torch.from_numpy(np.transpose(lastframe, (2,0,1)).astype('float32') / 255.).to(device, non_blocking=True).unsqueeze(0) - I1 = torch.from_numpy(np.transpose(frame, (2,0,1)).astype('float32') / 255.).to(device, non_blocking=True).unsqueeze(0) + I0 = torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255. + I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255. I0 = F.pad(I0, padding) I1 = F.pad(I1, padding) p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False) diff --git a/inference_video_parallel.py b/inference_video_parallel.py index de904ab..eaf18a1 100644 --- a/inference_video_parallel.py +++ b/inference_video_parallel.py @@ -105,8 +105,9 @@ while success: if success: img_list.append(frame) if len(img_list) == 5 or (not success and len(img_list) > 1): - I0 = torch.from_numpy(np.transpose(img_list[:-1], (0, 3, 1, 2)).astype('float32') / 255.).to(device, non_blocking=True) - I1 = torch.from_numpy(np.transpose(img_list[1:], (0, 3, 1, 2)).astype('float32') / 255.).to(device, non_blocking=True) + imgs = torch.from_numpy(np.transpose(img_list, (0, 3, 1, 2))).to(device, non_blocking=True).float() / 255. + I0 = imgs[:-1] + I1 = imgs[1:] p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False) - F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs() I0 = F.pad(I0, padding)