make state value controllable, delete redundant function, detach from shanggao's some bad code, improve the first frame select -- li

This commit is contained in:
memoryunreal
2023-04-17 21:13:45 +00:00
parent 98c93ace38
commit e63459fdb6

261
app.py
View File

@@ -34,16 +34,6 @@ def download_checkpoint(url, folder, filename):
return filepath
def pause_video(play_state):
print("user pause_video")
play_state.append(time.time())
return play_state
def play_video(play_state):
print("user play_video")
play_state.append(time.time())
return play_state
# convert points input to prompt state
def get_prompt(click_state, click_input):
inputs = json.loads(click_input)
@@ -62,7 +52,7 @@ def get_prompt(click_state, click_input):
}
return prompt
def get_frames_from_video(video_input, play_state):
def get_frames_from_video(video_input, video_state):
"""
Args:
video_path:str
@@ -71,10 +61,6 @@ def get_frames_from_video(video_input, play_state):
[[0:nearest_frame], [nearest_frame:], nearest_frame]
"""
video_path = video_input
try:
timestamp = play_state[1] - play_state[0]
except:
timestamp = 0.1
frames = []
try:
cap = cv2.VideoCapture(video_path)
@@ -88,18 +74,31 @@ def get_frames_from_video(video_input, play_state):
except (OSError, TypeError, ValueError, KeyError, SyntaxError) as e:
print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
# for index, frame in enumerate(frames):
# frames[index] = np.asarray(Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)))
key_frame_index = int(timestamp * fps)
nearest_frame = frames[key_frame_index]
frames_split = [frames[:key_frame_index], frames[key_frame_index:], nearest_frame]
# output_path='./seperate.mp4'
# torchvision.io.write_video(output_path, frames[1], fps=fps, video_codec="libx264")
# initialize video_state
video_state = {
"video_name": os.path.split(video_path)[-1],
"origin_images": frames,
"painted_images": frames.copy(),
"masks": [None]*len(frames),
"logits": [None]*len(frames),
"select_frame_number": 0,
"fps": 30
}
return video_state, gr.update(visible=True, maximum=len(frames), value=1)
# set image in sam when select the template frame
model.samcontroler.sam_controler.set_image(nearest_frame)
return frames_split, nearest_frame, nearest_frame, fps
def select_template(image_selection_slider, video_state):
# images = video_state[1]
image_selection_slider -= 1
video_state["select_frame_number"] = image_selection_slider
# once select a new template frame, set the image in sam
model.samcontroler.sam_controler.reset_image()
model.samcontroler.sam_controler.set_image(video_state["origin_images"][image_selection_slider])
return video_state["painted_images"][image_selection_slider], video_state
def generate_video_from_frames(frames, output_path, fps=30):
"""
@@ -110,24 +109,13 @@ def generate_video_from_frames(frames, output_path, fps=30):
output_path (str): The path to save the generated video.
fps (int, optional): The frame rate of the output video. Defaults to 30.
"""
# height, width, layers = frames[0].shape
# fourcc = cv2.VideoWriter_fourcc(*"mp4v")
# video = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
# for frame in frames:
# video.write(frame)
# video.release()
frames = torch.from_numpy(np.asarray(frames))
output_path='./output.mp4'
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
torchvision.io.write_video(output_path, frames, fps=fps, video_codec="libx264")
return output_path
def model_reset():
model.xmem.clear_memory()
return None
def sam_refine(origin_frame, point_prompt, click_state, logit, evt:gr.SelectData):
def sam_refine(video_state, point_prompt, click_state, evt:gr.SelectData):
"""
Args:
template_frame: PIL.Image
@@ -142,35 +130,17 @@ def sam_refine(origin_frame, point_prompt, click_state, logit, evt:gr.SelectData
# prompt for sam model
prompt = get_prompt(click_state=click_state, click_input=coordinate)
# default value
# points = np.array([[evt.index[0],evt.index[1]]])
# labels= np.array([1])
if len(logit)==0:
logit = None
mask, logit, painted_image = model.first_frame_click(
image=origin_frame,
image=video_state["origin_images"][video_state["select_frame_number"]],
points=np.array(prompt["input_point"]),
labels=np.array(prompt["input_label"]),
multimask=prompt["multimask_output"],
)
return painted_image, click_state, logit, mask
video_state["masks"][video_state["select_frame_number"]] = mask
video_state["logits"][video_state["select_frame_number"]] = logit
video_state["painted_images"][video_state["select_frame_number"]] = painted_image
def vos_tracking_video(video_state, template_mask,fps):
masks, logits, painted_images = model.generator(images=video_state[1], template_mask=template_mask)
video_output = generate_video_from_frames(painted_images, output_path="./output.mp4", fps=fps)
# image_selection_slider = gr.Slider(minimum=1, maximum=len(video_state[1]), value=1, label="Image Selection", interactive=True)
return video_output, painted_images, masks, logits
def vos_tracking_image(image_selection_slider, painted_images):
# images = video_state[1]
percentage = image_selection_slider / 100
select_frame_num = int(percentage * len(painted_images))
return painted_images[select_frame_num], select_frame_num
return painted_image, video_state
def interactive_correction(video_state, point_prompt, click_state, select_correction_frame, evt: gr.SelectData):
"""
@@ -187,7 +157,7 @@ def interactive_correction(video_state, point_prompt, click_state, select_correc
# prompt for sam model
prompt = get_prompt(click_state=click_state, click_input=coordinate)
model.samcontroler.seg_again(refine_image)
# model.samcontroler.seg_again(refine_image)
corrected_mask, corrected_logit, corrected_painted_image = model.first_frame_click(
image=refine_image,
points=np.array(prompt["input_point"]),
@@ -196,17 +166,23 @@ def interactive_correction(video_state, point_prompt, click_state, select_correc
)
return corrected_painted_image, [corrected_mask, corrected_logit, corrected_painted_image]
def correct_track(video_state, select_correction_frame, corrected_state, masks, logits, painted_images, fps):
def vos_tracking_video(video_state):
model.xmem.clear_memory()
# inference the following images
following_images = video_state[1][select_correction_frame:]
corrected_masks, corrected_logits, corrected_painted_images = model.generator(images=following_images, template_mask=corrected_state[0])
masks = masks[:select_correction_frame] + corrected_masks
logits = logits[:select_correction_frame] + corrected_logits
painted_images = painted_images[:select_correction_frame] + corrected_painted_images
video_output = generate_video_from_frames(painted_images, output_path="./output.mp4", fps=fps)
following_frames = video_state["origin_images"][video_state["select_frame_number"]:]
template_mask = video_state["masks"][video_state["select_frame_number"]]
fps = video_state["fps"]
masks, logits, painted_images = model.generator(images=following_frames, template_mask=template_mask)
video_state["masks"][video_state["select_frame_number"]:] = masks
video_state["logits"][video_state["select_frame_number"]:] = logits
video_state["painted_images"][video_state["select_frame_number"]:] = painted_images
video_output = generate_video_from_frames(video_state["painted_images"], output_path="./result/{}".format(video_state["video_name"]), fps=fps) # import video_input to name the output video
return video_output, video_state
return video_output, painted_images, logits, masks
# check and download checkpoints if needed
SAM_checkpoint = "sam_vit_h_4b8939.pth"
@@ -219,8 +195,8 @@ xmem_checkpoint = download_checkpoint(xmem_checkpoint_url, folder, xmem_checkpoi
# args, defined in track_anything.py
args = parse_augment()
args.port = 12219
args.device = "cuda:5"
args.port = 12212
args.device = "cuda:2"
model = TrackingAnything(SAM_checkpoint, xmem_checkpoint, args)
@@ -228,19 +204,19 @@ with gr.Blocks() as iface:
"""
state for
"""
state = gr.State([])
play_state = gr.State([])
video_state = gr.State([[],[],[]])
click_state = gr.State([[],[]])
logits = gr.State([])
masks = gr.State([])
painted_images = gr.State([])
origin_image = gr.State(None)
template_mask = gr.State(None)
select_correction_frame = gr.State(None)
corrected_state = gr.State([[],[],[]])
fps = gr.State([])
# queue value for image refresh, origin image, mask, logits, painted image
video_state = gr.State(
{
"video_name": "",
"origin_images": None,
"painted_images": None,
"masks": None,
"logits": None,
"select_frame_number": 0,
"fps": 30
}
)
@@ -250,9 +226,6 @@ with gr.Blocks() as iface:
with gr.Column(scale=1.0):
video_input = gr.Video().style(height=720)
# listen to the user action for play and pause input video
video_input.play(fn=play_video, inputs=play_state, outputs=play_state, scroll_to_output=True, show_progress=True)
video_input.pause(fn=pause_video, inputs=play_state, outputs=play_state)
with gr.Row(scale=1):
@@ -275,75 +248,97 @@ with gr.Blocks() as iface:
clear_button_clike = gr.Button(value="Clear Clicks", interactive=True).style(height=160)
clear_button_image = gr.Button(value="Clear Image", interactive=True)
template_frame = gr.Image(type="pil",interactive=True, elem_id="template_frame").style(height=360)
image_selection_slider = gr.Slider(minimum=1, maximum=100, step=1, value=1, label="Image Selection", invisible=False)
# extract frames
with gr.Column():
template_select_button = gr.Button(value="Template select", interactive=True, variant="primary")
extract_frames_button = gr.Button(value="Get video info", interactive=True, variant="primary")
with gr.Column(scale=0.5):
# for intermedia result check and correction
# intermedia_image = gr.Image(type="pil", interactive=True, elem_id="intermedia_frame").style(height=360)
video_output = gr.Video().style(height=360)
tracking_video_predict_button = gr.Button(value="Tracking")
image_output = gr.Image(type="pil", interactive=True, elem_id="image_output").style(height=360)
image_selection_slider = gr.Slider(minimum=0, maximum=100, step=0.1, value=0, label="Image Selection", interactive=True)
correct_track_button = gr.Button(value="Interactive Correction")
# first step: get the video information
extract_frames_button.click(
fn=get_frames_from_video,
inputs=[
video_input, video_state
],
outputs=[video_state, image_selection_slider],
)
# second step: select images from slider
image_selection_slider.release(fn=select_template,
inputs=[image_selection_slider, video_state],
outputs=[template_frame, video_state], api_name="select_image")
template_frame.select(
fn=sam_refine,
inputs=[
origin_image, point_prompt, click_state, logits
],
outputs=[
template_frame, click_state, logits, template_mask
]
inputs=[video_state, point_prompt, click_state],
outputs=[template_frame, video_state]
)
template_select_button.click(
fn=get_frames_from_video,
inputs=[
video_input,
play_state
],
outputs=[video_state, template_frame, origin_image, fps],
)
tracking_video_predict_button.click(
fn=vos_tracking_video,
inputs=[video_state, template_mask, fps],
outputs=[video_output, painted_images, masks, logits]
)
image_selection_slider.release(fn=vos_tracking_image,
inputs=[image_selection_slider, painted_images], outputs=[image_output, select_correction_frame], api_name="select_image")
# correction
image_output.select(
fn=interactive_correction,
inputs=[video_state, point_prompt, click_state, select_correction_frame],
outputs=[image_output, corrected_state]
)
correct_track_button.click(
fn=correct_track,
inputs=[video_state, select_correction_frame, corrected_state, masks, logits, painted_images, fps],
outputs=[video_output, painted_images, logits, masks ]
inputs=[video_state],
outputs=[video_output, video_state]
)
# clear input
video_input.clear(
lambda: ([], [], [[], [], []],
None, "", "", "", "", "", "", "", [[],[]],
None),
lambda: (
{
"origin_images": None,
"painted_images": None,
"masks": None,
"logits": None,
"select_frame_number": 0,
"fps": 30
},
[[],[]]
),
[],
[ state, play_state, video_state,
template_frame, video_output, image_output, origin_image, template_mask, painted_images, masks, logits, click_state,
select_correction_frame],
[
video_state,
click_state,
],
queue=False,
show_progress=False
)
clear_button_image.click(
fn=model_reset
lambda: (
{
"origin_images": None,
"painted_images": None,
"masks": None,
"logits": None,
"select_frame_number": 0,
"fps": 30
},
[[],[]]
),
[],
[
video_state,
click_state,
],
queue=False,
show_progress=False
)
clear_button_clike.click(
lambda: ([[],[]]),