mirror of
https://github.com/gaomingqi/Track-Anything.git
synced 2025-12-14 15:37:50 +01:00
start app develop
This commit is contained in:
91
app.py
Normal file
91
app.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import gradio as gr
|
||||
from demo import automask_image_app, automask_video_app, sahi_autoseg_app
|
||||
import argparse
|
||||
import cv2
|
||||
import time
|
||||
def pause_video():
|
||||
print(time.time())
|
||||
def play_video():
|
||||
print("play video")
|
||||
print(time.time)
|
||||
|
||||
|
||||
with gr.Blocks() as iface:
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1.0):
|
||||
seg_automask_video_file = gr.Video().style(height=720)
|
||||
gr.Video.g
|
||||
seg_automask_video_file.play(fn=play_video)
|
||||
seg_automask_video_file.pause(fn=pause_video)
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
seg_automask_video_model_type = gr.Dropdown(
|
||||
choices=[
|
||||
"vit_h",
|
||||
"vit_l",
|
||||
"vit_b",
|
||||
],
|
||||
value="vit_l",
|
||||
label="Model Type",
|
||||
)
|
||||
seg_automask_video_min_area = gr.Number(
|
||||
value=1000,
|
||||
label="Min Area",
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
seg_automask_video_points_per_side = gr.Slider(
|
||||
minimum=0,
|
||||
maximum=32,
|
||||
step=2,
|
||||
value=16,
|
||||
label="Points per Side",
|
||||
)
|
||||
|
||||
seg_automask_video_points_per_batch = gr.Slider(
|
||||
minimum=0,
|
||||
maximum=64,
|
||||
step=2,
|
||||
value=64,
|
||||
label="Points per Batch",
|
||||
)
|
||||
|
||||
seg_automask_video_predict = gr.Button(value="Generator")
|
||||
|
||||
|
||||
# Display the first frame
|
||||
# with gr.Column():
|
||||
# first_frame = gr.Image(type="pil", interactive=True, elem_id="first_frame")
|
||||
# seg_automask_firstframe = gr.Button(value="Find target")
|
||||
|
||||
# video_input = gr.inputs.Video(type="mp4")
|
||||
|
||||
# output = gr.outputs.Image(type="pil")
|
||||
|
||||
# gr.Interface(fn=capture_frame, inputs=seg_automask_video_file, outputs=first_frame)
|
||||
|
||||
# seg_automask_video_predict.click(
|
||||
# fn=automask_video_app,
|
||||
# inputs=[
|
||||
# seg_automask_video_file,
|
||||
# seg_automask_video_model_type,
|
||||
# seg_automask_video_points_per_side,
|
||||
# seg_automask_video_points_per_batch,
|
||||
# seg_automask_video_min_area,
|
||||
# ],
|
||||
# outputs=[output_video],
|
||||
# )
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
iface.queue(concurrency_count=1)
|
||||
iface.launch(debug=True, enable_queue=True, server_port=12212, server_name="0.0.0.0")
|
||||
|
||||
|
||||
|
||||
37
app_test.py
Normal file
37
app_test.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import gradio as gr
|
||||
import time
|
||||
|
||||
def capture_frame(video):
|
||||
frame = video.get_frame_at_sec(video.current_time)
|
||||
return frame
|
||||
|
||||
def capture_time(video):
|
||||
while True:
|
||||
if video.paused:
|
||||
time_paused = video.current_time
|
||||
return time_paused
|
||||
|
||||
iface = gr.Interface(fn=capture_frame,
|
||||
inputs=[gr.inputs.Video(type="mp4", label="Input video",
|
||||
source="upload")],
|
||||
outputs=["image"],
|
||||
server_port=12212,
|
||||
server_name="0.0.0.0",
|
||||
capture_session=True)
|
||||
|
||||
video_player = iface.video[0]
|
||||
video_player.pause = False
|
||||
|
||||
time_interface = gr.Interface(fn=capture_time,
|
||||
inputs=[gr.inputs.Video(type="mp4", label="Input video",
|
||||
source="upload", max_duration=10)],
|
||||
outputs=["text"],
|
||||
server_port=12212,
|
||||
server_name="0.0.0.0",
|
||||
capture_session=True)
|
||||
|
||||
time_interface.video[0].play = False
|
||||
time_interface.video[0].pause = False
|
||||
|
||||
iface.launch()
|
||||
time_interface.launch()
|
||||
87
demo.py
Normal file
87
demo.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from metaseg import SegAutoMaskPredictor, SegManualMaskPredictor, SahiAutoSegmentation, sahi_sliced_predict
|
||||
|
||||
# For image
|
||||
|
||||
def automask_image_app(image_path, model_type, points_per_side, points_per_batch, min_area):
|
||||
SegAutoMaskPredictor().image_predict(
|
||||
source=image_path,
|
||||
model_type=model_type, # vit_l, vit_h, vit_b
|
||||
points_per_side=points_per_side,
|
||||
points_per_batch=points_per_batch,
|
||||
min_area=min_area,
|
||||
output_path="output.png",
|
||||
show=False,
|
||||
save=True,
|
||||
)
|
||||
return "output.png"
|
||||
|
||||
|
||||
# For video
|
||||
|
||||
def automask_video_app(video_path, model_type, points_per_side, points_per_batch, min_area):
|
||||
SegAutoMaskPredictor().video_predict(
|
||||
source=video_path,
|
||||
model_type=model_type, # vit_l, vit_h, vit_b
|
||||
points_per_side=points_per_side,
|
||||
points_per_batch=points_per_batch,
|
||||
min_area=min_area,
|
||||
output_path="output.mp4",
|
||||
)
|
||||
return "output.mp4"
|
||||
|
||||
|
||||
# For manuel box and point selection
|
||||
|
||||
def manual_app(image_path, model_type, input_point, input_label, input_box, multimask_output, random_color):
|
||||
SegManualMaskPredictor().image_predict(
|
||||
source=image_path,
|
||||
model_type=model_type, # vit_l, vit_h, vit_b
|
||||
input_point=input_point,
|
||||
input_label=input_label,
|
||||
input_box=input_box,
|
||||
multimask_output=multimask_output,
|
||||
random_color=random_color,
|
||||
output_path="output.png",
|
||||
show=False,
|
||||
save=True,
|
||||
)
|
||||
return "output.png"
|
||||
|
||||
|
||||
# For sahi sliced prediction
|
||||
|
||||
def sahi_autoseg_app(
|
||||
image_path,
|
||||
sam_model_type,
|
||||
detection_model_type,
|
||||
detection_model_path,
|
||||
conf_th,
|
||||
image_size,
|
||||
slice_height,
|
||||
slice_width,
|
||||
overlap_height_ratio,
|
||||
overlap_width_ratio,
|
||||
):
|
||||
boxes = sahi_sliced_predict(
|
||||
image_path=image_path,
|
||||
detection_model_type=detection_model_type, # yolov8, detectron2, mmdetection, torchvision
|
||||
detection_model_path=detection_model_path,
|
||||
conf_th=conf_th,
|
||||
image_size=image_size,
|
||||
slice_height=slice_height,
|
||||
slice_width=slice_width,
|
||||
overlap_height_ratio=overlap_height_ratio,
|
||||
overlap_width_ratio=overlap_width_ratio,
|
||||
)
|
||||
|
||||
SahiAutoSegmentation().predict(
|
||||
source=image_path,
|
||||
model_type=sam_model_type,
|
||||
input_box=boxes,
|
||||
multimask_output=False,
|
||||
random_color=False,
|
||||
show=False,
|
||||
save=True,
|
||||
)
|
||||
|
||||
return "output.png"
|
||||
@@ -12,4 +12,4 @@ pycocotools
|
||||
matplotlib
|
||||
onnxruntime
|
||||
onnx
|
||||
|
||||
metaseg
|
||||
|
||||
BIN
test_sample/test-sample1.mp4
Normal file
BIN
test_sample/test-sample1.mp4
Normal file
Binary file not shown.
Reference in New Issue
Block a user