mirror of
https://github.com/hzwer/ECCV2022-RIFE.git
synced 2026-02-24 04:19:41 +01:00
Merge branch 'main' into main
This commit is contained in:
133
#Colab_demo.ipynb#
Normal file
133
#Colab_demo.ipynb#
Normal file
@@ -0,0 +1,133 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"name": "Untitled0.ipynb",
|
||||
"provenance": [],
|
||||
"include_colab_link": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/hzwer/arXiv2020-RIFE/blob/main/Colab_demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "FypCcZkNNt2p"
|
||||
},
|
||||
"source": [
|
||||
"!git clone https://github.com/hzwer/arXiv2020-RIFE"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "1wysVHxoN54f"
|
||||
},
|
||||
"source": [
|
||||
"!gdown --id 1zYc3PEN4t6GOUoVYJjvcXoMmM3kFDNGS\n",
|
||||
"!7z e RIFE_trained_model_new.zip"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "AhbHfRBJRAUt"
|
||||
},
|
||||
"source": [
|
||||
"!mkdir /content/arXiv2020-RIFE/train_log\n",
|
||||
"!mv *.pkl /content/arXiv2020-RIFE/train_log/\n",
|
||||
"%cd /content/arXiv2020-RIFE/\n",
|
||||
"!gdown --id 1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "rirngW5uRMdg"
|
||||
},
|
||||
"source": [
|
||||
"Please upload your video to content/arXiv2020-RIFE/video.mp4, or use our demo video."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "dnLn4aHHPzN3"
|
||||
},
|
||||
"source": [
|
||||
"!nvidia-smi\n",
|
||||
"!python3 inference_video.py --Colab_demo.ipynb=2 --video=demo.mp4 --montage --skip"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "77KK6lxHgJhf"
|
||||
},
|
||||
"source": [
|
||||
"Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n",
|
||||
"For example: \n",
|
||||
"--fps=60 --Colab_demo.ipynb=1 --video=mydemo.avi --png"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "Qzp53hpF5ynz"
|
||||
},
|
||||
"source": [
|
||||
"!nvidia-smi\n",
|
||||
"!python3 inference_video_parallel.py --Colab_demo.ipynb=1 --video=demo.mp4 --skip"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "KjqnCQ_-53I7"
|
||||
},
|
||||
"source": [
|
||||
"Try our 4 road parallel to process your video, expect 100% speedup! (Do not support montage mode)\n",
|
||||
"\n",
|
||||
"Unfortunately, no difference can be seen on Google colab servers."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"id": "0zIBbVE3UfUD"
|
||||
},
|
||||
"source": [
|
||||
"from IPython.display import display, Image\n",
|
||||
"import moviepy.editor as mpy\n",
|
||||
"display(mpy.ipython_display('demo_4X_100fps.mp4', height=256, max_duration=100.))"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -77,7 +77,7 @@
|
||||
},
|
||||
"source": [
|
||||
"!nvidia-smi\n",
|
||||
"!python3 inference_video.py --times=2 --video=demo.mp4 --montage --skip"
|
||||
"!python3 inference_video.py --exp=2 --video=demo.mp4 --montage --skip"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
@@ -90,7 +90,7 @@
|
||||
"source": [
|
||||
"Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n",
|
||||
"For example: \n",
|
||||
"--fps=60 --times=1 --video=mydemo.avi --png"
|
||||
"--fps=60 --exp=1 --video=mydemo.avi --png"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -100,7 +100,7 @@
|
||||
},
|
||||
"source": [
|
||||
"!nvidia-smi\n",
|
||||
"!python3 inference_video_parallel.py --times=1 --video=demo.mp4 --skip"
|
||||
"!python3 inference_video_parallel.py --exp=1 --video=demo.mp4 --skip"
|
||||
],
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
|
||||
12
README.md
12
README.md
@@ -38,15 +38,15 @@ The models under different setting is coming soon.
|
||||
|
||||
You can use our [demo video](https://drive.google.com/file/d/1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc/view?usp=sharing) or use your own video to process.
|
||||
```
|
||||
$ python3 inference_video.py --times=1 --video=video.mp4
|
||||
$ python3 inference_video.py --exp=1 --video=video.mp4
|
||||
```
|
||||
(generate video_2X_xxfps.mp4, you can use this script repeatly to get 4X, 8X...)
|
||||
```
|
||||
$ python3 inference_video.py --times=2 --video=video.mp4
|
||||
$ python3 inference_video.py --exp=2 --video=video.mp4
|
||||
```
|
||||
(we specificly support times=2 for 4X interpolation)
|
||||
(we specificly support exp=2 for 4X interpolation)
|
||||
```
|
||||
$ python3 inference_video.py --times=2 --video=video.mp4 --fps=60
|
||||
$ python3 inference_video.py --exp=2 --video=video.mp4 --fps=60
|
||||
```
|
||||
(add slomo effect)
|
||||
```
|
||||
@@ -54,7 +54,7 @@ $ python3 inference_video.py --video=video.mp4 --montage --png
|
||||
```
|
||||
(if you want to montage the origin video, and save the png format output)
|
||||
```
|
||||
$ python3 inference_video_parallel.py --times=2 --video=video.mp4
|
||||
$ python3 inference_video_parallel.py --exp=2 --video=video.mp4
|
||||
```
|
||||
(Try our parallel process to get 100% speedup!)
|
||||
|
||||
@@ -63,7 +63,7 @@ The warning info, 'Warning: Your video has *** static frames, it may change the
|
||||
**Image Interpolation**
|
||||
|
||||
```
|
||||
$ python3 inference_img.py --img img0.png img1.png --times=4
|
||||
$ python3 inference_img.py --img img0.png img1.png --exp=4
|
||||
```
|
||||
(2^4=16X interpolation results)
|
||||
After that, you can use pngs to generate mp4:
|
||||
|
||||
@@ -9,7 +9,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
|
||||
parser.add_argument('--img', dest='img', nargs=2, required=True)
|
||||
parser.add_argument('--times', default=4, type=int)
|
||||
parser.add_argument('--exp', default=4, type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
model = Model()
|
||||
@@ -30,7 +30,7 @@ img0 = F.pad(img0, padding)
|
||||
img1 = F.pad(img1, padding)
|
||||
|
||||
img_list = [img0, img1]
|
||||
for i in range(args.times):
|
||||
for i in range(args.exp):
|
||||
tmp = []
|
||||
for j in range(len(img_list) - 1):
|
||||
mid = model.inference(img_list[j], img_list[j + 1])
|
||||
|
||||
@@ -19,10 +19,10 @@ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to
|
||||
parser.add_argument('--fps', dest='fps', type=int, default=None)
|
||||
parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs')
|
||||
parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension')
|
||||
parser.add_argument('--times', dest='times', type=int, default=1)
|
||||
parser.add_argument('--exp', dest='exp', type=int, default=1)
|
||||
args = parser.parse_args()
|
||||
assert (args.times == 1 or args.times == 2)
|
||||
args.times = 2 ** args.times
|
||||
assert (args.exp == 1 or args.exp == 2)
|
||||
args.exp = 2 ** args.exp
|
||||
|
||||
from model.RIFE import Model
|
||||
model = Model()
|
||||
@@ -33,7 +33,7 @@ model.device()
|
||||
videoCapture = cv2.VideoCapture(args.video)
|
||||
fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS))
|
||||
if args.fps is None:
|
||||
args.fps = fps * args.times
|
||||
args.fps = fps * args.exp
|
||||
success, frame = videoCapture.read()
|
||||
h, w, _ = frame.shape
|
||||
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
|
||||
@@ -42,7 +42,7 @@ if args.png:
|
||||
os.mkdir('output')
|
||||
else:
|
||||
video_path_wo_ext, ext = os.path.splitext(args.video)
|
||||
output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.times, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h))
|
||||
output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exp, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h))
|
||||
|
||||
cnt = 0
|
||||
def writeframe(frame):
|
||||
@@ -88,25 +88,25 @@ while success:
|
||||
mid2 = frame
|
||||
else:
|
||||
mid1 = model.inference(I0, I1)
|
||||
if args.times == 4:
|
||||
if args.exp == 4:
|
||||
mid = model.inference(torch.cat((I0, mid1), 0), torch.cat((mid1, I1), 0))
|
||||
mid1 = (((mid1[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8')
|
||||
if args.times == 4:
|
||||
if args.exp == 4:
|
||||
mid0 = (((mid[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8')
|
||||
mid2 = (((mid[1]* 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8')
|
||||
if args.montage:
|
||||
writeframe(np.concatenate((lastframe, lastframe), 1))
|
||||
if args.times == 4:
|
||||
if args.exp == 4:
|
||||
writeframe(np.concatenate((lastframe, mid0[:h, :w]), 1))
|
||||
writeframe(np.concatenate((lastframe, mid1[:h, :w]), 1))
|
||||
if args.times == 4:
|
||||
if args.exp == 4:
|
||||
writeframe(np.concatenate((lastframe, mid2[:h, :w]), 1))
|
||||
else:
|
||||
writeframe(lastframe)
|
||||
if args.times == 4:
|
||||
if args.exp == 4:
|
||||
writeframe(mid0[:h, :w])
|
||||
writeframe(mid1[:h, :w])
|
||||
if args.times == 4:
|
||||
if args.exp == 4:
|
||||
writeframe(mid2[:h, :w])
|
||||
pbar.update(1)
|
||||
if args.montage:
|
||||
|
||||
@@ -118,4 +118,4 @@ while success:
|
||||
pbar.update(4)
|
||||
img_list = img_list[-1:]
|
||||
pbar.close()
|
||||
vid_out.release()
|
||||
vid_out.release()
|
||||
Reference in New Issue
Block a user