mirror of
https://github.com/gaomingqi/Track-Anything.git
synced 2025-12-16 16:37:58 +01:00
Merge branch 'master' of https://github.com/gaomingqi/VOS-Anything
This commit is contained in:
373
app.py
373
app.py
@@ -18,6 +18,7 @@ import torch
|
||||
import concurrent.futures
|
||||
import queue
|
||||
|
||||
# download checkpoints
|
||||
def download_checkpoint(url, folder, filename):
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
filepath = os.path.join(folder, filename)
|
||||
@@ -34,16 +35,6 @@ def download_checkpoint(url, folder, filename):
|
||||
|
||||
return filepath
|
||||
|
||||
def pause_video(play_state):
|
||||
print("user pause_video")
|
||||
play_state.append(time.time())
|
||||
return play_state
|
||||
|
||||
def play_video(play_state):
|
||||
print("user play_video")
|
||||
play_state.append(time.time())
|
||||
return play_state
|
||||
|
||||
# convert points input to prompt state
|
||||
def get_prompt(click_state, click_input):
|
||||
inputs = json.loads(click_input)
|
||||
@@ -62,7 +53,8 @@ def get_prompt(click_state, click_input):
|
||||
}
|
||||
return prompt
|
||||
|
||||
def get_frames_from_video(video_input, play_state):
|
||||
# extract frames from upload video
|
||||
def get_frames_from_video(video_input, video_state):
|
||||
"""
|
||||
Args:
|
||||
video_path:str
|
||||
@@ -71,10 +63,6 @@ def get_frames_from_video(video_input, play_state):
|
||||
[[0:nearest_frame], [nearest_frame:], nearest_frame]
|
||||
"""
|
||||
video_path = video_input
|
||||
try:
|
||||
timestamp = play_state[1] - play_state[0]
|
||||
except:
|
||||
timestamp = 0.1
|
||||
frames = []
|
||||
try:
|
||||
cap = cv2.VideoCapture(video_path)
|
||||
@@ -88,19 +76,97 @@ def get_frames_from_video(video_input, play_state):
|
||||
except (OSError, TypeError, ValueError, KeyError, SyntaxError) as e:
|
||||
print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
|
||||
|
||||
# for index, frame in enumerate(frames):
|
||||
# frames[index] = np.asarray(Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)))
|
||||
# initialize video_state
|
||||
video_state = {
|
||||
"video_name": os.path.split(video_path)[-1],
|
||||
"origin_images": frames,
|
||||
"painted_images": frames.copy(),
|
||||
"masks": [None]*len(frames),
|
||||
"logits": [None]*len(frames),
|
||||
"select_frame_number": 0,
|
||||
"fps": 30
|
||||
}
|
||||
return video_state, gr.update(visible=True, maximum=len(frames), value=1)
|
||||
|
||||
key_frame_index = int(timestamp * fps)
|
||||
nearest_frame = frames[key_frame_index]
|
||||
frames_split = [frames[:key_frame_index], frames[key_frame_index:], nearest_frame]
|
||||
# output_path='./seperate.mp4'
|
||||
# torchvision.io.write_video(output_path, frames[1], fps=fps, video_codec="libx264")
|
||||
# get the select frame from gradio slider
|
||||
def select_template(image_selection_slider, video_state):
|
||||
|
||||
# set image in sam when select the template frame
|
||||
model.samcontroler.sam_controler.set_image(nearest_frame)
|
||||
return frames_split, nearest_frame, nearest_frame, fps
|
||||
# images = video_state[1]
|
||||
image_selection_slider -= 1
|
||||
video_state["select_frame_number"] = image_selection_slider
|
||||
|
||||
# once select a new template frame, set the image in sam
|
||||
|
||||
model.samcontroler.sam_controler.reset_image()
|
||||
model.samcontroler.sam_controler.set_image(video_state["origin_images"][image_selection_slider])
|
||||
|
||||
|
||||
return video_state["painted_images"][image_selection_slider], video_state
|
||||
|
||||
# use sam to get the mask
|
||||
def sam_refine(video_state, point_prompt, click_state, interactive_state, evt:gr.SelectData):
|
||||
"""
|
||||
Args:
|
||||
template_frame: PIL.Image
|
||||
point_prompt: flag for positive or negative button click
|
||||
click_state: [[points], [labels]]
|
||||
"""
|
||||
if point_prompt == "Positive":
|
||||
coordinate = "[[{},{},1]]".format(evt.index[0], evt.index[1])
|
||||
interactive_state["positive_click_times"] += 1
|
||||
else:
|
||||
coordinate = "[[{},{},0]]".format(evt.index[0], evt.index[1])
|
||||
interactive_state["negative_click_times"] += 1
|
||||
|
||||
# prompt for sam model
|
||||
prompt = get_prompt(click_state=click_state, click_input=coordinate)
|
||||
|
||||
mask, logit, painted_image = model.first_frame_click(
|
||||
image=video_state["origin_images"][video_state["select_frame_number"]],
|
||||
points=np.array(prompt["input_point"]),
|
||||
labels=np.array(prompt["input_label"]),
|
||||
multimask=prompt["multimask_output"],
|
||||
)
|
||||
video_state["masks"][video_state["select_frame_number"]] = mask
|
||||
video_state["logits"][video_state["select_frame_number"]] = logit
|
||||
video_state["painted_images"][video_state["select_frame_number"]] = painted_image
|
||||
|
||||
return painted_image, video_state, interactive_state
|
||||
|
||||
# tracking vos
|
||||
def vos_tracking_video(video_state, interactive_state):
|
||||
model.xmem.clear_memory()
|
||||
following_frames = video_state["origin_images"][video_state["select_frame_number"]:]
|
||||
template_mask = video_state["masks"][video_state["select_frame_number"]]
|
||||
fps = video_state["fps"]
|
||||
masks, logits, painted_images = model.generator(images=following_frames, template_mask=template_mask)
|
||||
|
||||
video_state["masks"][video_state["select_frame_number"]:] = masks
|
||||
video_state["logits"][video_state["select_frame_number"]:] = logits
|
||||
video_state["painted_images"][video_state["select_frame_number"]:] = painted_images
|
||||
|
||||
video_output = generate_video_from_frames(video_state["painted_images"], output_path="./result/{}".format(video_state["video_name"]), fps=fps) # import video_input to name the output video
|
||||
interactive_state["inference_times"] += 1
|
||||
|
||||
print("For generating this tracking result, inference times: {}, click times: {}, positive: {}, negative: {}".format(interactive_state["inference_times"],
|
||||
interactive_state["positive_click_times"]+interactive_state["negative_click_times"],
|
||||
interactive_state["positive_click_times"],
|
||||
interactive_state["negative_click_times"]))
|
||||
|
||||
#### shanggao code for mask save
|
||||
if interactive_state["mask_save"]:
|
||||
if not os.path.exists('./result/mask/{}'.format(video_state["video_name"].split('.')[0])):
|
||||
os.makedirs('./result/mask/{}'.format(video_state["video_name"].split('.')[0]))
|
||||
i = 0
|
||||
print("save mask")
|
||||
for mask in video_state["masks"]:
|
||||
np.save(os.path.join('./result/mask/{}'.format(video_state["video_name"].split('.')[0]), '{:05d}.npy'.format(i)), mask)
|
||||
i+=1
|
||||
# save_mask(video_state["masks"], video_state["video_name"])
|
||||
#### shanggao code for mask save
|
||||
return video_output, video_state, interactive_state
|
||||
|
||||
# generate video after vos inference
|
||||
def generate_video_from_frames(frames, output_path, fps=30):
|
||||
"""
|
||||
Generates a video from a list of frames.
|
||||
@@ -110,104 +176,12 @@ def generate_video_from_frames(frames, output_path, fps=30):
|
||||
output_path (str): The path to save the generated video.
|
||||
fps (int, optional): The frame rate of the output video. Defaults to 30.
|
||||
"""
|
||||
# height, width, layers = frames[0].shape
|
||||
# fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
||||
# video = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||||
|
||||
# for frame in frames:
|
||||
# video.write(frame)
|
||||
|
||||
# video.release()
|
||||
frames = torch.from_numpy(np.asarray(frames))
|
||||
output_path='./output.mp4'
|
||||
if not os.path.exists(os.path.dirname(output_path)):
|
||||
os.makedirs(os.path.dirname(output_path))
|
||||
torchvision.io.write_video(output_path, frames, fps=fps, video_codec="libx264")
|
||||
return output_path
|
||||
|
||||
def model_reset():
|
||||
model.xmem.clear_memory()
|
||||
return None
|
||||
|
||||
def sam_refine(origin_frame, point_prompt, click_state, logit, evt:gr.SelectData):
|
||||
"""
|
||||
Args:
|
||||
template_frame: PIL.Image
|
||||
point_prompt: flag for positive or negative button click
|
||||
click_state: [[points], [labels]]
|
||||
"""
|
||||
if point_prompt == "Positive":
|
||||
coordinate = "[[{},{},1]]".format(evt.index[0], evt.index[1])
|
||||
else:
|
||||
coordinate = "[[{},{},0]]".format(evt.index[0], evt.index[1])
|
||||
|
||||
# prompt for sam model
|
||||
prompt = get_prompt(click_state=click_state, click_input=coordinate)
|
||||
|
||||
# default value
|
||||
# points = np.array([[evt.index[0],evt.index[1]]])
|
||||
# labels= np.array([1])
|
||||
if len(logit)==0:
|
||||
logit = None
|
||||
|
||||
mask, logit, painted_image = model.first_frame_click(
|
||||
image=origin_frame,
|
||||
points=np.array(prompt["input_point"]),
|
||||
labels=np.array(prompt["input_label"]),
|
||||
multimask=prompt["multimask_output"],
|
||||
)
|
||||
return painted_image, click_state, logit, mask
|
||||
|
||||
|
||||
|
||||
def vos_tracking_video(video_state, template_mask,fps):
|
||||
|
||||
masks, logits, painted_images = model.generator(images=video_state[1], template_mask=template_mask)
|
||||
video_output = generate_video_from_frames(painted_images, output_path="./output.mp4", fps=fps)
|
||||
# image_selection_slider = gr.Slider(minimum=1, maximum=len(video_state[1]), value=1, label="Image Selection", interactive=True)
|
||||
return video_output, painted_images, masks, logits
|
||||
|
||||
def vos_tracking_image(image_selection_slider, painted_images):
|
||||
|
||||
# images = video_state[1]
|
||||
percentage = image_selection_slider / 100
|
||||
select_frame_num = int(percentage * len(painted_images))
|
||||
return painted_images[select_frame_num], select_frame_num
|
||||
|
||||
def interactive_correction(video_state, point_prompt, click_state, select_correction_frame, evt: gr.SelectData):
|
||||
"""
|
||||
Args:
|
||||
template_frame: PIL.Image
|
||||
point_prompt: flag for positive or negative button click
|
||||
click_state: [[points], [labels]]
|
||||
"""
|
||||
refine_image = video_state[1][select_correction_frame]
|
||||
if point_prompt == "Positive":
|
||||
coordinate = "[[{},{},1]]".format(evt.index[0], evt.index[1])
|
||||
else:
|
||||
coordinate = "[[{},{},0]]".format(evt.index[0], evt.index[1])
|
||||
|
||||
# prompt for sam model
|
||||
prompt = get_prompt(click_state=click_state, click_input=coordinate)
|
||||
model.samcontroler.seg_again(refine_image)
|
||||
corrected_mask, corrected_logit, corrected_painted_image = model.first_frame_click(
|
||||
image=refine_image,
|
||||
points=np.array(prompt["input_point"]),
|
||||
labels=np.array(prompt["input_label"]),
|
||||
multimask=prompt["multimask_output"],
|
||||
)
|
||||
return corrected_painted_image, [corrected_mask, corrected_logit, corrected_painted_image]
|
||||
|
||||
def correct_track(video_state, select_correction_frame, corrected_state, masks, logits, painted_images, fps):
|
||||
model.xmem.clear_memory()
|
||||
# inference the following images
|
||||
following_images = video_state[1][select_correction_frame:]
|
||||
corrected_masks, corrected_logits, corrected_painted_images = model.generator(images=following_images, template_mask=corrected_state[0])
|
||||
masks = masks[:select_correction_frame] + corrected_masks
|
||||
logits = logits[:select_correction_frame] + corrected_logits
|
||||
painted_images = painted_images[:select_correction_frame] + corrected_painted_images
|
||||
video_output = generate_video_from_frames(painted_images, output_path="./output.mp4", fps=fps)
|
||||
|
||||
return video_output, painted_images, logits, masks
|
||||
|
||||
# check and download checkpoints if needed
|
||||
SAM_checkpoint = "sam_vit_h_4b8939.pth"
|
||||
sam_checkpoint_url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
|
||||
@@ -219,8 +193,9 @@ xmem_checkpoint = download_checkpoint(xmem_checkpoint_url, folder, xmem_checkpoi
|
||||
|
||||
# args, defined in track_anything.py
|
||||
args = parse_augment()
|
||||
args.port = 12219
|
||||
args.device = "cuda:5"
|
||||
args.port = 12212
|
||||
args.device = "cuda:4"
|
||||
args.mask_save = True
|
||||
|
||||
model = TrackingAnything(SAM_checkpoint, xmem_checkpoint, args)
|
||||
|
||||
@@ -228,36 +203,40 @@ with gr.Blocks() as iface:
|
||||
"""
|
||||
state for
|
||||
"""
|
||||
state = gr.State([])
|
||||
play_state = gr.State([])
|
||||
video_state = gr.State([[],[],[]])
|
||||
click_state = gr.State([[],[]])
|
||||
logits = gr.State([])
|
||||
masks = gr.State([])
|
||||
painted_images = gr.State([])
|
||||
origin_image = gr.State(None)
|
||||
template_mask = gr.State(None)
|
||||
select_correction_frame = gr.State(None)
|
||||
corrected_state = gr.State([[],[],[]])
|
||||
fps = gr.State([])
|
||||
# queue value for image refresh, origin image, mask, logits, painted image
|
||||
|
||||
|
||||
interactive_state = gr.State({
|
||||
"inference_times": 0,
|
||||
"negative_click_times" : 0,
|
||||
"positive_click_times": 0,
|
||||
"mask_save": args.mask_save
|
||||
})
|
||||
video_state = gr.State(
|
||||
{
|
||||
"video_name": "",
|
||||
"origin_images": None,
|
||||
"painted_images": None,
|
||||
"masks": None,
|
||||
"logits": None,
|
||||
"select_frame_number": 0,
|
||||
"fps": 30
|
||||
}
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
|
||||
# for user video input
|
||||
with gr.Column(scale=1.0):
|
||||
video_input = gr.Video().style(height=720)
|
||||
video_input = gr.Video().style(height=360)
|
||||
|
||||
# listen to the user action for play and pause input video
|
||||
video_input.play(fn=play_video, inputs=play_state, outputs=play_state, scroll_to_output=True, show_progress=True)
|
||||
video_input.pause(fn=pause_video, inputs=play_state, outputs=play_state)
|
||||
|
||||
|
||||
with gr.Row(scale=1):
|
||||
# put the template frame under the radio button
|
||||
with gr.Column(scale=0.5):
|
||||
# extract frames
|
||||
with gr.Column():
|
||||
extract_frames_button = gr.Button(value="Get video info", interactive=True, variant="primary")
|
||||
|
||||
# click points settins, negative or positive, mode continuous or single
|
||||
with gr.Row():
|
||||
with gr.Row(scale=0.5):
|
||||
@@ -275,75 +254,99 @@ with gr.Blocks() as iface:
|
||||
clear_button_clike = gr.Button(value="Clear Clicks", interactive=True).style(height=160)
|
||||
clear_button_image = gr.Button(value="Clear Image", interactive=True)
|
||||
template_frame = gr.Image(type="pil",interactive=True, elem_id="template_frame").style(height=360)
|
||||
with gr.Column():
|
||||
template_select_button = gr.Button(value="Template select", interactive=True, variant="primary")
|
||||
image_selection_slider = gr.Slider(minimum=1, maximum=100, step=1, value=1, label="Image Selection", invisible=False)
|
||||
|
||||
|
||||
|
||||
|
||||
with gr.Column(scale=0.5):
|
||||
|
||||
|
||||
# for intermedia result check and correction
|
||||
# intermedia_image = gr.Image(type="pil", interactive=True, elem_id="intermedia_frame").style(height=360)
|
||||
video_output = gr.Video().style(height=360)
|
||||
tracking_video_predict_button = gr.Button(value="Tracking")
|
||||
|
||||
image_output = gr.Image(type="pil", interactive=True, elem_id="image_output").style(height=360)
|
||||
image_selection_slider = gr.Slider(minimum=0, maximum=100, step=0.1, value=0, label="Image Selection", interactive=True)
|
||||
correct_track_button = gr.Button(value="Interactive Correction")
|
||||
# first step: get the video information
|
||||
extract_frames_button.click(
|
||||
fn=get_frames_from_video,
|
||||
inputs=[
|
||||
video_input, video_state
|
||||
],
|
||||
outputs=[video_state, image_selection_slider],
|
||||
)
|
||||
|
||||
# second step: select images from slider
|
||||
image_selection_slider.release(fn=select_template,
|
||||
inputs=[image_selection_slider, video_state],
|
||||
outputs=[template_frame, video_state], api_name="select_image")
|
||||
|
||||
|
||||
template_frame.select(
|
||||
fn=sam_refine,
|
||||
inputs=[
|
||||
origin_image, point_prompt, click_state, logits
|
||||
],
|
||||
outputs=[
|
||||
template_frame, click_state, logits, template_mask
|
||||
]
|
||||
)
|
||||
|
||||
template_select_button.click(
|
||||
fn=get_frames_from_video,
|
||||
inputs=[
|
||||
video_input,
|
||||
play_state
|
||||
],
|
||||
outputs=[video_state, template_frame, origin_image, fps],
|
||||
inputs=[video_state, point_prompt, click_state, interactive_state],
|
||||
outputs=[template_frame, video_state, interactive_state]
|
||||
)
|
||||
|
||||
tracking_video_predict_button.click(
|
||||
fn=vos_tracking_video,
|
||||
inputs=[video_state, template_mask, fps],
|
||||
outputs=[video_output, painted_images, masks, logits]
|
||||
)
|
||||
image_selection_slider.release(fn=vos_tracking_image,
|
||||
inputs=[image_selection_slider, painted_images], outputs=[image_output, select_correction_frame], api_name="select_image")
|
||||
# correction
|
||||
image_output.select(
|
||||
fn=interactive_correction,
|
||||
inputs=[video_state, point_prompt, click_state, select_correction_frame],
|
||||
outputs=[image_output, corrected_state]
|
||||
)
|
||||
correct_track_button.click(
|
||||
fn=correct_track,
|
||||
inputs=[video_state, select_correction_frame, corrected_state, masks, logits, painted_images, fps],
|
||||
outputs=[video_output, painted_images, logits, masks ]
|
||||
inputs=[video_state, interactive_state],
|
||||
outputs=[video_output, video_state, interactive_state]
|
||||
)
|
||||
|
||||
|
||||
# clear input
|
||||
video_input.clear(
|
||||
lambda: ([], [], [[], [], []],
|
||||
None, "", "", "", "", "", "", "", [[],[]],
|
||||
None),
|
||||
lambda: (
|
||||
{
|
||||
"origin_images": None,
|
||||
"painted_images": None,
|
||||
"masks": None,
|
||||
"logits": None,
|
||||
"select_frame_number": 0,
|
||||
"fps": 30
|
||||
},
|
||||
{
|
||||
"inference_times": 0,
|
||||
"negative_click_times" : 0,
|
||||
"positive_click_times": 0,
|
||||
"mask_save": args.mask_save
|
||||
},
|
||||
[[],[]]
|
||||
),
|
||||
[],
|
||||
[ state, play_state, video_state,
|
||||
template_frame, video_output, image_output, origin_image, template_mask, painted_images, masks, logits, click_state,
|
||||
select_correction_frame],
|
||||
[
|
||||
video_state,
|
||||
interactive_state,
|
||||
click_state,
|
||||
],
|
||||
queue=False,
|
||||
show_progress=False
|
||||
)
|
||||
clear_button_image.click(
|
||||
fn=model_reset
|
||||
lambda: (
|
||||
{
|
||||
"origin_images": None,
|
||||
"painted_images": None,
|
||||
"masks": None,
|
||||
"logits": None,
|
||||
"select_frame_number": 0,
|
||||
"fps": 30
|
||||
},
|
||||
{
|
||||
"inference_times": 0,
|
||||
"negative_click_times" : 0,
|
||||
"positive_click_times": 0,
|
||||
"mask_save": args.mask_save
|
||||
},
|
||||
[[],[]]
|
||||
),
|
||||
[],
|
||||
[
|
||||
video_state,
|
||||
interactive_state,
|
||||
click_state,
|
||||
],
|
||||
|
||||
queue=False,
|
||||
show_progress=False
|
||||
|
||||
)
|
||||
clear_button_clike.click(
|
||||
lambda: ([[],[]]),
|
||||
|
||||
@@ -62,6 +62,7 @@ def parse_augment():
|
||||
parser.add_argument('--sam_model_type', type=str, default="vit_h")
|
||||
parser.add_argument('--port', type=int, default=6080, help="only useful when running gradio applications")
|
||||
parser.add_argument('--debug', action="store_true")
|
||||
parser.add_argument('--mask_save', default=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
|
||||
@@ -182,7 +182,7 @@ class MemoryManager:
|
||||
if self.enable_long_term:
|
||||
# Do memory compressed if needed
|
||||
if self.work_mem.size >= self.max_work_elements:
|
||||
print('remove memory')
|
||||
# print('remove memory')
|
||||
# Remove obsolete features if needed
|
||||
if self.long_mem.size >= (self.max_long_elements-self.num_prototypes):
|
||||
self.long_mem.remove_obsolete_features(self.max_long_elements-self.num_prototypes)
|
||||
@@ -239,8 +239,8 @@ class MemoryManager:
|
||||
|
||||
# add to long-term memory
|
||||
self.long_mem.add(prototype_key, prototype_value, prototype_shrinkage, selection=None, objects=None)
|
||||
print(f'long memory size: {self.long_mem.size}')
|
||||
print(f'work memory size: {self.work_mem.size}')
|
||||
# print(f'long memory size: {self.long_mem.size}')
|
||||
# print(f'work memory size: {self.work_mem.size}')
|
||||
|
||||
def consolidation(self, candidate_key, candidate_shrinkage, candidate_selection, usage, candidate_value):
|
||||
# keys: 1*C*N
|
||||
|
||||
Reference in New Issue
Block a user