From dfd713790b18a55d55e249be1fd7032a63a92357 Mon Sep 17 00:00:00 2001 From: hzwer <598460606@163.com> Date: Thu, 19 Nov 2020 11:05:05 +0800 Subject: [PATCH 1/3] Change 'times' to 'exponent' --- #Colab_demo.ipynb# | 133 ++++++++++++++++++++++++++++++++++++ Colab_demo.ipynb | 6 +- README.md | 12 ++-- inference_img.py | 4 +- inference_video.py | 22 +++--- inference_video_parallel.py | 26 +++---- 6 files changed, 168 insertions(+), 35 deletions(-) create mode 100644 #Colab_demo.ipynb# diff --git a/#Colab_demo.ipynb# b/#Colab_demo.ipynb# new file mode 100644 index 0000000..4409b6e --- /dev/null +++ b/#Colab_demo.ipynb# @@ -0,0 +1,133 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Untitled0.ipynb", + "provenance": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "FypCcZkNNt2p" + }, + "source": [ + "!git clone https://github.com/hzwer/arXiv2020-RIFE" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "1wysVHxoN54f" + }, + "source": [ + "!gdown --id 1zYc3PEN4t6GOUoVYJjvcXoMmM3kFDNGS\n", + "!7z e RIFE_trained_model_new.zip" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "AhbHfRBJRAUt" + }, + "source": [ + "!mkdir /content/arXiv2020-RIFE/train_log\n", + "!mv *.pkl /content/arXiv2020-RIFE/train_log/\n", + "%cd /content/arXiv2020-RIFE/\n", + "!gdown --id 1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rirngW5uRMdg" + }, + "source": [ + "Please upload your video to content/arXiv2020-RIFE/video.mp4, or use our demo video." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dnLn4aHHPzN3" + }, + "source": [ + "!nvidia-smi\n", + "!python3 inference_video.py --Colab_demo.ipynb=2 --video=demo.mp4 --montage --skip" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "77KK6lxHgJhf" + }, + "source": [ + "Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n", + "For example: \n", + "--fps=60 --Colab_demo.ipynb=1 --video=mydemo.avi --png" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Qzp53hpF5ynz" + }, + "source": [ + "!nvidia-smi\n", + "!python3 inference_video_parallel.py --Colab_demo.ipynb=1 --video=demo.mp4 --skip" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KjqnCQ_-53I7" + }, + "source": [ + "Try our 4 road parallel to process your video, expect 100% speedup! (Do not support montage mode)\n", + "\n", + "Unfortunately, no difference can be seen on Google colab servers." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "0zIBbVE3UfUD" + }, + "source": [ + "from IPython.display import display, Image\n", + "import moviepy.editor as mpy\n", + "display(mpy.ipython_display('demo_4X_100fps.mp4', height=256, max_duration=100.))" + ], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/Colab_demo.ipynb b/Colab_demo.ipynb index 5ef7dfc..7ade6c8 100644 --- a/Colab_demo.ipynb +++ b/Colab_demo.ipynb @@ -77,7 +77,7 @@ }, "source": [ "!nvidia-smi\n", - "!python3 inference_video.py --times=2 --video=demo.mp4 --montage --skip" + "!python3 inference_video.py --exponent=2 --video=demo.mp4 --montage --skip" ], "execution_count": null, "outputs": [] @@ -90,7 +90,7 @@ "source": [ "Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n", "For example: \n", - "--fps=60 --times=1 --video=mydemo.avi --png" + "--fps=60 --exponent=1 --video=mydemo.avi --png" ] }, { @@ -100,7 +100,7 @@ }, "source": [ "!nvidia-smi\n", - "!python3 inference_video_parallel.py --times=1 --video=demo.mp4 --skip" + "!python3 inference_video_parallel.py --exponent=1 --video=demo.mp4 --skip" ], "execution_count": null, "outputs": [] diff --git a/README.md b/README.md index 8c8028e..7ea3ed4 100644 --- a/README.md +++ b/README.md @@ -38,15 +38,15 @@ The models under different setting is coming soon. You can use our [demo video](https://drive.google.com/file/d/1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc/view?usp=sharing) or use your own video to process. ``` -$ python3 inference_video.py --times=1 --video=video.mp4 +$ python3 inference_video.py --exponent=1 --video=video.mp4 ``` (generate video_2X_xxfps.mp4, you can use this script repeatly to get 4X, 8X...) ``` -$ python3 inference_video.py --times=2 --video=video.mp4 +$ python3 inference_video.py --exponent=2 --video=video.mp4 ``` -(we specificly support times=2 for 4X interpolation) +(we specificly support exponent=2 for 4X interpolation) ``` -$ python3 inference_video.py --times=2 --video=video.mp4 --fps=60 +$ python3 inference_video.py --exponent=2 --video=video.mp4 --fps=60 ``` (add slomo effect) ``` @@ -54,7 +54,7 @@ $ python3 inference_video.py --video=video.mp4 --montage --png ``` (if you want to montage the origin video, and save the png format output) ``` -$ python3 inference_video_parallel.py --times=2 --video=video.mp4 +$ python3 inference_video_parallel.py --exponent=2 --video=video.mp4 ``` (Try our parallel process to get 100% speedup!) @@ -63,7 +63,7 @@ The warning info, 'Warning: Your video has *** static frames, it may change the **Image Interpolation** ``` -$ python3 inference_img.py --img img0.png img1.png --times=4 +$ python3 inference_img.py --img img0.png img1.png --exponent=4 ``` (2^4=16X interpolation results) After that, you can use pngs to generate mp4: diff --git a/inference_img.py b/inference_img.py index d9b78cb..e68548a 100644 --- a/inference_img.py +++ b/inference_img.py @@ -9,7 +9,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") parser = argparse.ArgumentParser(description='Interpolation for a pair of images') parser.add_argument('--img', dest='img', nargs=2, required=True) -parser.add_argument('--times', default=4, type=int) +parser.add_argument('--exponent', default=4, type=int) args = parser.parse_args() model = Model() @@ -30,7 +30,7 @@ img0 = F.pad(img0, padding) img1 = F.pad(img1, padding) img_list = [img0, img1] -for i in range(args.times): +for i in range(args.exponent): tmp = [] for j in range(len(img_list) - 1): mid = model.inference(img_list[j], img_list[j + 1]) diff --git a/inference_video.py b/inference_video.py index 9d33ba6..47c84d6 100644 --- a/inference_video.py +++ b/inference_video.py @@ -19,10 +19,10 @@ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to parser.add_argument('--fps', dest='fps', type=int, default=None) parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs') parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension') -parser.add_argument('--times', dest='times', type=int, default=1) +parser.add_argument('--exponent', dest='exponent', type=int, default=1) args = parser.parse_args() -assert (args.times == 1 or args.times == 2) -args.times = 2 ** args.times +assert (args.exponent == 1 or args.exponent == 2) +args.exponent = 2 ** args.exponent from model.RIFE import Model model = Model() @@ -33,7 +33,7 @@ model.device() videoCapture = cv2.VideoCapture(args.video) fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS)) if args.fps is None: - args.fps = fps * args.times + args.fps = fps * args.exponent success, frame = videoCapture.read() h, w, _ = frame.shape fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') @@ -42,7 +42,7 @@ if args.png: os.mkdir('output') else: video_path_wo_ext, ext = os.path.splitext(args.video) - output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.times, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) + output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exponent, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) cnt = 0 def writeframe(frame): @@ -88,25 +88,25 @@ while success: mid2 = frame else: mid1 = model.inference(I0, I1) - if args.times == 4: + if args.exponent == 4: mid = model.inference(torch.cat((I0, mid1), 0), torch.cat((mid1, I1), 0)) mid1 = (((mid1[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8') - if args.times == 4: + if args.exponent == 4: mid0 = (((mid[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8') mid2 = (((mid[1]* 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8') if args.montage: writeframe(np.concatenate((lastframe, lastframe), 1)) - if args.times == 4: + if args.exponent == 4: writeframe(np.concatenate((lastframe, mid0[:h, :w]), 1)) writeframe(np.concatenate((lastframe, mid1[:h, :w]), 1)) - if args.times == 4: + if args.exponent == 4: writeframe(np.concatenate((lastframe, mid2[:h, :w]), 1)) else: writeframe(lastframe) - if args.times == 4: + if args.exponent == 4: writeframe(mid0[:h, :w]) writeframe(mid1[:h, :w]) - if args.times == 4: + if args.exponent == 4: writeframe(mid2[:h, :w]) pbar.update(1) if args.montage: diff --git a/inference_video_parallel.py b/inference_video_parallel.py index 6491c08..35eba8c 100644 --- a/inference_video_parallel.py +++ b/inference_video_parallel.py @@ -18,10 +18,10 @@ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to parser.add_argument('--fps', dest='fps', type=int, default=None) parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs') parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension') -parser.add_argument('--times', dest='times', type=int, default=1) +parser.add_argument('--exponent', dest='exponent', type=int, default=1) args = parser.parse_args() -assert (args.times == 1 or args.times == 2) -args.times = 2 ** args.times +assert (args.exponent == 1 or args.exponent == 2) +args.exponent = 2 ** args.exponent from model.RIFE import Model model = Model() @@ -34,14 +34,14 @@ fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS)) success, frame = videoCapture.read() h, w, _ = frame.shape if args.fps is None: - args.fps = fps * args.times + args.fps = fps * args.exponent fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') if args.png: if not os.path.exists('output'): os.mkdir('output') else: video_path_wo_ext, ext = os.path.splitext(args.video) - output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.times, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) + output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exponent, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) cnt = 0 skip_frame = 1 @@ -49,10 +49,10 @@ def writeframe(I0, mid0, mid1, mid2, I1, p): global cnt, skip_frame, args for i in range(I0.shape[0]): if p[i] > 0.2: - if args.times == 4: + if args.exponent == 4: mid0[i] = I0[i] mid1[i] = I0[i] - if args.times == 4: + if args.exponent == 4: mid2[i] = I1[i] if p[i] < 2e-3 and args.skip: if skip_frame % 100 == 0: @@ -62,20 +62,20 @@ def writeframe(I0, mid0, mid1, mid2, I1, p): if args.png: cv2.imwrite('output/{:0>7d}.png'.format(cnt), I0[i]) cnt += 1 - if args.times == 4: + if args.exponent == 4: cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid0[i]) cnt += 1 cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid1[i]) cnt += 1 - if args.times == 4: + if args.exponent == 4: cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid2[i]) cnt += 1 else: output.write(I0[i]) - if args.times == 4: + if args.exponent == 4: output.write(mid0[i]) output.write(mid1[i]) - if args.times == 4: + if args.exponent == 4: output.write(mid2[i]) @@ -98,13 +98,13 @@ while success: I0 = F.pad(I0, padding) I1 = F.pad(I1, padding) mid1 = model.inference(I0, I1) - if args.times == 4: + if args.exponent == 4: mid0 = model.inference(I0, mid1) mid2 = model.inference(mid1, I1) I0 = ((I0[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') I1 = ((I1[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') mid1 = ((mid1[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') - if args.times == 4: + if args.exponent == 4: mid0 = ((mid0[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') mid2 = ((mid2[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') else: From 85a17da8bb851e436a50b4289082b616f8ffd3f8 Mon Sep 17 00:00:00 2001 From: hzwer <598460606@163.com> Date: Thu, 19 Nov 2020 11:07:37 +0800 Subject: [PATCH 2/3] Fix typo --- Colab_demo.ipynb | 6 +++--- README.md | 12 ++++++------ inference_img.py | 4 ++-- inference_video.py | 22 +++++++++++----------- inference_video_parallel.py | 26 +++++++++++++------------- 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/Colab_demo.ipynb b/Colab_demo.ipynb index 7ade6c8..09604e4 100644 --- a/Colab_demo.ipynb +++ b/Colab_demo.ipynb @@ -77,7 +77,7 @@ }, "source": [ "!nvidia-smi\n", - "!python3 inference_video.py --exponent=2 --video=demo.mp4 --montage --skip" + "!python3 inference_video.py --exp=2 --video=demo.mp4 --montage --skip" ], "execution_count": null, "outputs": [] @@ -90,7 +90,7 @@ "source": [ "Our demo.mp4 is 25FPS. You can adjust the parameters for your own perference.\n", "For example: \n", - "--fps=60 --exponent=1 --video=mydemo.avi --png" + "--fps=60 --exp=1 --video=mydemo.avi --png" ] }, { @@ -100,7 +100,7 @@ }, "source": [ "!nvidia-smi\n", - "!python3 inference_video_parallel.py --exponent=1 --video=demo.mp4 --skip" + "!python3 inference_video_parallel.py --exp=1 --video=demo.mp4 --skip" ], "execution_count": null, "outputs": [] diff --git a/README.md b/README.md index 7ea3ed4..094f09b 100644 --- a/README.md +++ b/README.md @@ -38,15 +38,15 @@ The models under different setting is coming soon. You can use our [demo video](https://drive.google.com/file/d/1i3xlKb7ax7Y70khcTcuePi6E7crO_dFc/view?usp=sharing) or use your own video to process. ``` -$ python3 inference_video.py --exponent=1 --video=video.mp4 +$ python3 inference_video.py --exp=1 --video=video.mp4 ``` (generate video_2X_xxfps.mp4, you can use this script repeatly to get 4X, 8X...) ``` -$ python3 inference_video.py --exponent=2 --video=video.mp4 +$ python3 inference_video.py --exp=2 --video=video.mp4 ``` -(we specificly support exponent=2 for 4X interpolation) +(we specificly support exp=2 for 4X interpolation) ``` -$ python3 inference_video.py --exponent=2 --video=video.mp4 --fps=60 +$ python3 inference_video.py --exp=2 --video=video.mp4 --fps=60 ``` (add slomo effect) ``` @@ -54,7 +54,7 @@ $ python3 inference_video.py --video=video.mp4 --montage --png ``` (if you want to montage the origin video, and save the png format output) ``` -$ python3 inference_video_parallel.py --exponent=2 --video=video.mp4 +$ python3 inference_video_parallel.py --exp=2 --video=video.mp4 ``` (Try our parallel process to get 100% speedup!) @@ -63,7 +63,7 @@ The warning info, 'Warning: Your video has *** static frames, it may change the **Image Interpolation** ``` -$ python3 inference_img.py --img img0.png img1.png --exponent=4 +$ python3 inference_img.py --img img0.png img1.png --exp=4 ``` (2^4=16X interpolation results) After that, you can use pngs to generate mp4: diff --git a/inference_img.py b/inference_img.py index e68548a..d90128c 100644 --- a/inference_img.py +++ b/inference_img.py @@ -9,7 +9,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") parser = argparse.ArgumentParser(description='Interpolation for a pair of images') parser.add_argument('--img', dest='img', nargs=2, required=True) -parser.add_argument('--exponent', default=4, type=int) +parser.add_argument('--exp', default=4, type=int) args = parser.parse_args() model = Model() @@ -30,7 +30,7 @@ img0 = F.pad(img0, padding) img1 = F.pad(img1, padding) img_list = [img0, img1] -for i in range(args.exponent): +for i in range(args.exp): tmp = [] for j in range(len(img_list) - 1): mid = model.inference(img_list[j], img_list[j + 1]) diff --git a/inference_video.py b/inference_video.py index 47c84d6..d67e605 100644 --- a/inference_video.py +++ b/inference_video.py @@ -19,10 +19,10 @@ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to parser.add_argument('--fps', dest='fps', type=int, default=None) parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs') parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension') -parser.add_argument('--exponent', dest='exponent', type=int, default=1) +parser.add_argument('--exp', dest='exp', type=int, default=1) args = parser.parse_args() -assert (args.exponent == 1 or args.exponent == 2) -args.exponent = 2 ** args.exponent +assert (args.exp == 1 or args.exp == 2) +args.exp = 2 ** args.exp from model.RIFE import Model model = Model() @@ -33,7 +33,7 @@ model.device() videoCapture = cv2.VideoCapture(args.video) fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS)) if args.fps is None: - args.fps = fps * args.exponent + args.fps = fps * args.exp success, frame = videoCapture.read() h, w, _ = frame.shape fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') @@ -42,7 +42,7 @@ if args.png: os.mkdir('output') else: video_path_wo_ext, ext = os.path.splitext(args.video) - output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exponent, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) + output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exp, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) cnt = 0 def writeframe(frame): @@ -88,25 +88,25 @@ while success: mid2 = frame else: mid1 = model.inference(I0, I1) - if args.exponent == 4: + if args.exp == 4: mid = model.inference(torch.cat((I0, mid1), 0), torch.cat((mid1, I1), 0)) mid1 = (((mid1[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8') - if args.exponent == 4: + if args.exp == 4: mid0 = (((mid[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8') mid2 = (((mid[1]* 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8') if args.montage: writeframe(np.concatenate((lastframe, lastframe), 1)) - if args.exponent == 4: + if args.exp == 4: writeframe(np.concatenate((lastframe, mid0[:h, :w]), 1)) writeframe(np.concatenate((lastframe, mid1[:h, :w]), 1)) - if args.exponent == 4: + if args.exp == 4: writeframe(np.concatenate((lastframe, mid2[:h, :w]), 1)) else: writeframe(lastframe) - if args.exponent == 4: + if args.exp == 4: writeframe(mid0[:h, :w]) writeframe(mid1[:h, :w]) - if args.exponent == 4: + if args.exp == 4: writeframe(mid2[:h, :w]) pbar.update(1) if args.montage: diff --git a/inference_video_parallel.py b/inference_video_parallel.py index 35eba8c..4503697 100644 --- a/inference_video_parallel.py +++ b/inference_video_parallel.py @@ -18,10 +18,10 @@ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to parser.add_argument('--fps', dest='fps', type=int, default=None) parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs') parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension') -parser.add_argument('--exponent', dest='exponent', type=int, default=1) +parser.add_argument('--exp', dest='exp', type=int, default=1) args = parser.parse_args() -assert (args.exponent == 1 or args.exponent == 2) -args.exponent = 2 ** args.exponent +assert (args.exp == 1 or args.exp == 2) +args.exp = 2 ** args.exp from model.RIFE import Model model = Model() @@ -34,14 +34,14 @@ fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS)) success, frame = videoCapture.read() h, w, _ = frame.shape if args.fps is None: - args.fps = fps * args.exponent + args.fps = fps * args.exp fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') if args.png: if not os.path.exists('output'): os.mkdir('output') else: video_path_wo_ext, ext = os.path.splitext(args.video) - output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exponent, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) + output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exp, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) cnt = 0 skip_frame = 1 @@ -49,10 +49,10 @@ def writeframe(I0, mid0, mid1, mid2, I1, p): global cnt, skip_frame, args for i in range(I0.shape[0]): if p[i] > 0.2: - if args.exponent == 4: + if args.exp == 4: mid0[i] = I0[i] mid1[i] = I0[i] - if args.exponent == 4: + if args.exp == 4: mid2[i] = I1[i] if p[i] < 2e-3 and args.skip: if skip_frame % 100 == 0: @@ -62,20 +62,20 @@ def writeframe(I0, mid0, mid1, mid2, I1, p): if args.png: cv2.imwrite('output/{:0>7d}.png'.format(cnt), I0[i]) cnt += 1 - if args.exponent == 4: + if args.exp == 4: cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid0[i]) cnt += 1 cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid1[i]) cnt += 1 - if args.exponent == 4: + if args.exp == 4: cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid2[i]) cnt += 1 else: output.write(I0[i]) - if args.exponent == 4: + if args.exp == 4: output.write(mid0[i]) output.write(mid1[i]) - if args.exponent == 4: + if args.exp == 4: output.write(mid2[i]) @@ -98,13 +98,13 @@ while success: I0 = F.pad(I0, padding) I1 = F.pad(I1, padding) mid1 = model.inference(I0, I1) - if args.exponent == 4: + if args.exp == 4: mid0 = model.inference(I0, mid1) mid2 = model.inference(mid1, I1) I0 = ((I0[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') I1 = ((I1[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') mid1 = ((mid1[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') - if args.exponent == 4: + if args.exp == 4: mid0 = ((mid0[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') mid2 = ((mid2[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') else: From fe66460513a19194d2eba1b4ac5a538d6b5ca5e1 Mon Sep 17 00:00:00 2001 From: hzwer <598460606@163.com> Date: Thu, 19 Nov 2020 11:12:29 +0800 Subject: [PATCH 3/3] For merge request --- inference_video_parallel.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/inference_video_parallel.py b/inference_video_parallel.py index 4503697..1c92797 100644 --- a/inference_video_parallel.py +++ b/inference_video_parallel.py @@ -18,10 +18,10 @@ parser.add_argument('--skip', dest='skip', action='store_true', help='whether to parser.add_argument('--fps', dest='fps', type=int, default=None) parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs') parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='output video extension') -parser.add_argument('--exp', dest='exp', type=int, default=1) +parser.add_argument('--times', dest='times', type=int, default=1) args = parser.parse_args() -assert (args.exp == 1 or args.exp == 2) -args.exp = 2 ** args.exp +assert (args.times == 1 or args.times == 2) +args.times = 2 ** args.times from model.RIFE import Model model = Model() @@ -34,14 +34,14 @@ fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS)) success, frame = videoCapture.read() h, w, _ = frame.shape if args.fps is None: - args.fps = fps * args.exp + args.fps = fps * args.times fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') if args.png: if not os.path.exists('output'): os.mkdir('output') else: video_path_wo_ext, ext = os.path.splitext(args.video) - output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.exp, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) + output = cv2.VideoWriter('{}_{}X_{}fps.{}'.format(video_path_wo_ext, args.times, int(np.round(args.fps)), args.ext), fourcc, args.fps, (w, h)) cnt = 0 skip_frame = 1 @@ -49,10 +49,10 @@ def writeframe(I0, mid0, mid1, mid2, I1, p): global cnt, skip_frame, args for i in range(I0.shape[0]): if p[i] > 0.2: - if args.exp == 4: + if args.times == 4: mid0[i] = I0[i] mid1[i] = I0[i] - if args.exp == 4: + if args.times == 4: mid2[i] = I1[i] if p[i] < 2e-3 and args.skip: if skip_frame % 100 == 0: @@ -60,22 +60,22 @@ def writeframe(I0, mid0, mid1, mid2, I1, p): skip_frame += 1 continue if args.png: - cv2.imwrite('output/{:0>7d}.png'.format(cnt), I0[i]) + cv2.imwrite('output/{:0>7d}.png'.format(cnt), I0[i]) cnt += 1 - if args.exp == 4: + if args.times == 4: cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid0[i]) cnt += 1 cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid1[i]) cnt += 1 - if args.exp == 4: + if args.times == 4: cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid2[i]) cnt += 1 else: output.write(I0[i]) - if args.exp == 4: + if args.times == 4: output.write(mid0[i]) output.write(mid1[i]) - if args.exp == 4: + if args.times == 4: output.write(mid2[i]) @@ -98,13 +98,13 @@ while success: I0 = F.pad(I0, padding) I1 = F.pad(I1, padding) mid1 = model.inference(I0, I1) - if args.exp == 4: + if args.times == 4: mid0 = model.inference(I0, mid1) mid2 = model.inference(mid1, I1) I0 = ((I0[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') I1 = ((I1[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') mid1 = ((mid1[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') - if args.exp == 4: + if args.times == 4: mid0 = ((mid0[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') mid2 = ((mid2[:, :, :h, :w] * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1)).astype('uint8') else: @@ -113,4 +113,3 @@ while success: pbar.update(4) img_list = img_list[-1:] pbar.close() -output.release()