mirror of
https://github.com/guoyww/AnimateDiff.git
synced 2025-12-16 08:27:51 +01:00
fix gradio
This commit is contained in:
316
app.py
316
app.py
@@ -17,16 +17,17 @@ from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from animatediff.models.unet import UNet3DConditionModel
|
||||
from animatediff.pipelines.pipeline_animation import AnimationPipeline
|
||||
from animatediff.utils.util import save_videos_grid
|
||||
from animatediff.utils.util import save_videos_grid, load_weights, auto_download, MOTION_MODULES, BACKUP_DREAMBOOTH_MODELS
|
||||
from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint
|
||||
from animatediff.utils.convert_lora_safetensor_to_diffusers import convert_lora
|
||||
import pdb
|
||||
|
||||
|
||||
sample_idx = 0
|
||||
sample_idx = 0
|
||||
scheduler_dict = {
|
||||
"DDIM": DDIMScheduler,
|
||||
"Euler": EulerDiscreteScheduler,
|
||||
"PNDM": PNDMScheduler,
|
||||
"DDIM": DDIMScheduler,
|
||||
}
|
||||
|
||||
css = """
|
||||
@@ -38,142 +39,160 @@ css = """
|
||||
}
|
||||
"""
|
||||
|
||||
PRETRAINED_SD = "runwayml/stable-diffusion-v1-5"
|
||||
|
||||
default_motion_module = "v3_sd15_mm.ckpt"
|
||||
default_inference_config = "configs/inference/inference-v3.yaml"
|
||||
default_dreambooth_model = "realisticVisionV60B1_v51VAE.safetensors"
|
||||
default_prompt = "b&w photo of 42 y.o man in black clothes, bald, face, half body, body, high detailed skin, skin pores, coastline, overcast weather, wind, waves, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3"
|
||||
default_n_prompt = "semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck"
|
||||
default_seed = 8893659352891878017
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
|
||||
class AnimateController:
|
||||
def __init__(self):
|
||||
|
||||
# config dirs
|
||||
self.basedir = os.getcwd()
|
||||
self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion")
|
||||
self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module")
|
||||
self.basedir = os.getcwd()
|
||||
self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion")
|
||||
self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module")
|
||||
self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA")
|
||||
self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S"))
|
||||
self.savedir_sample = os.path.join(self.savedir, "sample")
|
||||
self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S"))
|
||||
self.savedir_sample = os.path.join(self.savedir, "sample")
|
||||
os.makedirs(self.savedir, exist_ok=True)
|
||||
|
||||
self.stable_diffusion_list = []
|
||||
self.motion_module_list = []
|
||||
self.personalized_model_list = []
|
||||
|
||||
self.refresh_stable_diffusion()
|
||||
self.refresh_motion_module()
|
||||
self.refresh_personalized_model()
|
||||
self.stable_diffusion_list = [PRETRAINED_SD]
|
||||
self.motion_module_list = MOTION_MODULES
|
||||
self.personalized_model_list = BACKUP_DREAMBOOTH_MODELS
|
||||
|
||||
# config models
|
||||
self.tokenizer = None
|
||||
self.text_encoder = None
|
||||
self.vae = None
|
||||
self.unet = None
|
||||
self.pipeline = None
|
||||
self.lora_model_state_dict = {}
|
||||
self.pipeline = None
|
||||
# self.lora_model_state_dict = {}
|
||||
|
||||
self.inference_config = OmegaConf.load("configs/inference/inference.yaml")
|
||||
self.refresh_stable_diffusion()
|
||||
self.refresh_personalized_model()
|
||||
|
||||
# default setting
|
||||
self.update_pipeline(
|
||||
stable_diffusion_dropdown=PRETRAINED_SD,
|
||||
motion_module_dropdown=default_motion_module,
|
||||
base_model_dropdown=default_dreambooth_model,
|
||||
sampler_dropdown="DDIM",
|
||||
)
|
||||
|
||||
def refresh_stable_diffusion(self):
|
||||
self.stable_diffusion_list = glob(os.path.join(self.stable_diffusion_dir, "*/"))
|
||||
|
||||
def refresh_motion_module(self):
|
||||
motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt"))
|
||||
self.motion_module_list = [os.path.basename(p) for p in motion_module_list]
|
||||
self.stable_diffusion_list = [PRETRAINED_SD] + glob(os.path.join(self.stable_diffusion_dir, "*/"))
|
||||
|
||||
def refresh_personalized_model(self):
|
||||
personalized_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors"))
|
||||
self.personalized_model_list = [os.path.basename(p) for p in personalized_model_list]
|
||||
self.personalized_model_list = BACKUP_DREAMBOOTH_MODELS + [os.path.basename(p) for p in personalized_model_list if os.path.basename(p) not in BACKUP_DREAMBOOTH_MODELS]
|
||||
|
||||
def update_stable_diffusion(self, stable_diffusion_dropdown):
|
||||
self.tokenizer = CLIPTokenizer.from_pretrained(stable_diffusion_dropdown, subfolder="tokenizer")
|
||||
self.text_encoder = CLIPTextModel.from_pretrained(stable_diffusion_dropdown, subfolder="text_encoder").cuda()
|
||||
self.vae = AutoencoderKL.from_pretrained(stable_diffusion_dropdown, subfolder="vae").cuda()
|
||||
self.unet = UNet3DConditionModel.from_pretrained_2d(stable_diffusion_dropdown, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
|
||||
return gr.Dropdown.update()
|
||||
|
||||
def update_motion_module(self, motion_module_dropdown):
|
||||
if self.unet is None:
|
||||
gr.Info(f"Please select a pretrained model path.")
|
||||
return gr.Dropdown.update(value=None)
|
||||
else:
|
||||
motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown)
|
||||
motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu")
|
||||
missing, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False)
|
||||
assert len(unexpected) == 0
|
||||
return gr.Dropdown.update()
|
||||
|
||||
def update_base_model(self, base_model_dropdown):
|
||||
if self.unet is None:
|
||||
gr.Info(f"Please select a pretrained model path.")
|
||||
return gr.Dropdown.update(value=None)
|
||||
else:
|
||||
base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown)
|
||||
base_model_state_dict = {}
|
||||
with safe_open(base_model_dropdown, framework="pt", device="cpu") as f:
|
||||
for key in f.keys():
|
||||
base_model_state_dict[key] = f.get_tensor(key)
|
||||
|
||||
converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config)
|
||||
self.vae.load_state_dict(converted_vae_checkpoint)
|
||||
|
||||
converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config)
|
||||
self.unet.load_state_dict(converted_unet_checkpoint, strict=False)
|
||||
|
||||
self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict)
|
||||
return gr.Dropdown.update()
|
||||
|
||||
def update_lora_model(self, lora_model_dropdown):
|
||||
lora_model_dropdown = os.path.join(self.personalized_model_dir, lora_model_dropdown)
|
||||
self.lora_model_state_dict = {}
|
||||
if lora_model_dropdown == "none": pass
|
||||
else:
|
||||
with safe_open(lora_model_dropdown, framework="pt", device="cpu") as f:
|
||||
for key in f.keys():
|
||||
self.lora_model_state_dict[key] = f.get_tensor(key)
|
||||
return gr.Dropdown.update()
|
||||
|
||||
def animate(
|
||||
# for dropdown update
|
||||
def update_pipeline(
|
||||
self,
|
||||
stable_diffusion_dropdown,
|
||||
motion_module_dropdown,
|
||||
base_model_dropdown,
|
||||
lora_alpha_slider,
|
||||
prompt_textbox,
|
||||
negative_prompt_textbox,
|
||||
sampler_dropdown,
|
||||
sample_step_slider,
|
||||
width_slider,
|
||||
length_slider,
|
||||
height_slider,
|
||||
cfg_scale_slider,
|
||||
seed_textbox
|
||||
):
|
||||
if self.unet is None:
|
||||
raise gr.Error(f"Please select a pretrained model path.")
|
||||
if motion_module_dropdown == "":
|
||||
raise gr.Error(f"Please select a motion module.")
|
||||
if base_model_dropdown == "":
|
||||
raise gr.Error(f"Please select a base DreamBooth model.")
|
||||
base_model_dropdown="",
|
||||
lora_model_dropdown="none",
|
||||
lora_alpha_dropdown="0.6",
|
||||
sampler_dropdown="DDIM",
|
||||
):
|
||||
if "v2" in motion_module_dropdown:
|
||||
inference_config = "configs/inference/inference-v2.yaml"
|
||||
elif "v3" in motion_module_dropdown:
|
||||
inference_config = "configs/inference/inference-v3.yaml"
|
||||
else:
|
||||
inference_config = "configs/inference/inference-v1.yaml"
|
||||
|
||||
if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
|
||||
unet = UNet3DConditionModel.from_pretrained_2d(
|
||||
stable_diffusion_dropdown, subfolder="unet",
|
||||
unet_additional_kwargs=OmegaConf.load(inference_config).unet_additional_kwargs
|
||||
)
|
||||
if is_xformers_available() and torch.cuda.is_available():
|
||||
unet.enable_xformers_memory_efficient_attention()
|
||||
|
||||
noise_scheduler_cls = scheduler_dict[sampler_dropdown]
|
||||
noise_scheduler_kwargs = OmegaConf.load(inference_config).noise_scheduler_kwargs
|
||||
if noise_scheduler_cls == EulerDiscreteScheduler:
|
||||
noise_scheduler_kwargs.pop("steps_offset")
|
||||
noise_scheduler_kwargs.pop("clip_sample")
|
||||
elif noise_scheduler_cls == PNDMScheduler:
|
||||
noise_scheduler_kwargs.pop("clip_sample")
|
||||
|
||||
pipeline = AnimationPipeline(
|
||||
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet,
|
||||
scheduler=scheduler_dict[sampler_dropdown](**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs))
|
||||
).to("cuda")
|
||||
|
||||
if self.lora_model_state_dict != {}:
|
||||
pipeline = convert_lora(pipeline, self.lora_model_state_dict, alpha=lora_alpha_slider)
|
||||
unet=unet,
|
||||
vae=AutoencoderKL.from_pretrained(stable_diffusion_dropdown, subfolder="vae"),
|
||||
text_encoder=CLIPTextModel.from_pretrained(stable_diffusion_dropdown, subfolder="text_encoder"),
|
||||
tokenizer=CLIPTokenizer.from_pretrained(stable_diffusion_dropdown, subfolder="tokenizer"),
|
||||
scheduler=noise_scheduler_cls(**noise_scheduler_kwargs),
|
||||
)
|
||||
|
||||
pipeline.to("cuda")
|
||||
pipeline = load_weights(
|
||||
pipeline,
|
||||
motion_module_path=os.path.join(self.motion_module_dir, motion_module_dropdown),
|
||||
dreambooth_model_path=os.path.join(self.personalized_model_dir, base_model_dropdown) if base_model_dropdown != "" else "",
|
||||
lora_model_path=os.path.join(self.personalized_model_dir, lora_model_dropdown) if lora_model_dropdown != "none" else "",
|
||||
lora_alpha=float(lora_alpha_dropdown),
|
||||
)
|
||||
|
||||
if seed_textbox != -1 and seed_textbox != "": torch.manual_seed(int(seed_textbox))
|
||||
else: torch.seed()
|
||||
pipeline.to(device)
|
||||
self.pipeline = pipeline
|
||||
print("done.")
|
||||
|
||||
return gr.Dropdown.update()
|
||||
|
||||
def update_pipeline_alpha(
|
||||
self,
|
||||
stable_diffusion_dropdown,
|
||||
motion_module_dropdown,
|
||||
base_model_dropdown="",
|
||||
lora_model_dropdown="none",
|
||||
lora_alpha_dropdown="0.6",
|
||||
sampler_dropdown="DDIM",
|
||||
):
|
||||
if lora_model_dropdown == "none":
|
||||
return gr.Slider.update()
|
||||
|
||||
self.update_pipeline(
|
||||
stable_diffusion_dropdown=stable_diffusion_dropdown,
|
||||
motion_module_dropdown=motion_module_dropdown,
|
||||
base_model_dropdown=base_model_dropdown,
|
||||
lora_model_dropdown=lora_model_dropdown,
|
||||
lora_alpha_dropdown=lora_alpha_dropdown,
|
||||
sampler_dropdown=sampler_dropdown,
|
||||
)
|
||||
|
||||
return gr.Slider.update()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def animate(
|
||||
self,
|
||||
prompt_textbox,
|
||||
negative_prompt_textbox,
|
||||
sampler_dropdown,
|
||||
sample_step_slider,
|
||||
width_slider,
|
||||
length_slider,
|
||||
height_slider,
|
||||
cfg_scale_slider,
|
||||
seed_textbox,
|
||||
):
|
||||
if int(seed_textbox) != -1:
|
||||
torch.manual_seed(int(seed_textbox))
|
||||
else:
|
||||
torch.seed()
|
||||
seed = torch.initial_seed()
|
||||
|
||||
sample = pipeline(
|
||||
sample = self.pipeline(
|
||||
prompt_textbox,
|
||||
negative_prompt = negative_prompt_textbox,
|
||||
negative_prompt = negative_prompt_textbox,
|
||||
num_inference_steps = sample_step_slider,
|
||||
guidance_scale = cfg_scale_slider,
|
||||
width = width_slider,
|
||||
height = height_slider,
|
||||
video_length = length_slider,
|
||||
guidance_scale = cfg_scale_slider,
|
||||
width = width_slider,
|
||||
height = height_slider,
|
||||
video_length = length_slider,
|
||||
).videos
|
||||
|
||||
save_sample_path = os.path.join(self.savedir_sample, f"{sample_idx}.mp4")
|
||||
@@ -190,6 +209,7 @@ class AnimateController:
|
||||
"video_length": length_slider,
|
||||
"seed": seed
|
||||
}
|
||||
|
||||
json_str = json.dumps(sample_config, indent=4)
|
||||
with open(os.path.join(self.savedir, "logs.json"), "a") as f:
|
||||
f.write(json_str)
|
||||
@@ -205,51 +225,39 @@ def ui():
|
||||
with gr.Blocks(css=css) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
# [AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning](https://arxiv.org/abs/2307.04725)
|
||||
Yuwei Guo, Ceyuan Yang*, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai (*Corresponding Author)<br>
|
||||
[Arxiv Report](https://arxiv.org/abs/2307.04725) | [Project Page](https://animatediff.github.io/) | [Github](https://github.com/guoyww/animatediff/)
|
||||
# AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning
|
||||
Yuwei Guo, Ceyuan Yang✝, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, Bo Dai (✝Corresponding Author)<br>
|
||||
[Paper](https://arxiv.org/abs/2307.04725) | [Webpage](https://animatediff.github.io/) | [Github](https://github.com/guoyww/animatediff/)
|
||||
"""
|
||||
)
|
||||
with gr.Column(variant="panel"):
|
||||
gr.Markdown(
|
||||
"""
|
||||
### 1. Model checkpoints (select pretrained model path first).
|
||||
### 1. Model Checkpoints
|
||||
"""
|
||||
)
|
||||
with gr.Row():
|
||||
stable_diffusion_dropdown = gr.Dropdown(
|
||||
label="Pretrained Model Path",
|
||||
choices=controller.stable_diffusion_list,
|
||||
value=PRETRAINED_SD,
|
||||
interactive=True,
|
||||
)
|
||||
stable_diffusion_dropdown.change(fn=controller.update_stable_diffusion, inputs=[stable_diffusion_dropdown], outputs=[stable_diffusion_dropdown])
|
||||
|
||||
stable_diffusion_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
|
||||
def update_stable_diffusion():
|
||||
controller.refresh_stable_diffusion()
|
||||
return gr.Dropdown.update(choices=controller.stable_diffusion_list)
|
||||
stable_diffusion_refresh_button.click(fn=update_stable_diffusion, inputs=[], outputs=[stable_diffusion_dropdown])
|
||||
|
||||
with gr.Row():
|
||||
motion_module_dropdown = gr.Dropdown(
|
||||
label="Select motion module",
|
||||
choices=controller.motion_module_list,
|
||||
value=default_motion_module,
|
||||
interactive=True,
|
||||
)
|
||||
motion_module_dropdown.change(fn=controller.update_motion_module, inputs=[motion_module_dropdown], outputs=[motion_module_dropdown])
|
||||
|
||||
motion_module_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
|
||||
def update_motion_module():
|
||||
controller.refresh_motion_module()
|
||||
return gr.Dropdown.update(choices=controller.motion_module_list)
|
||||
motion_module_refresh_button.click(fn=update_motion_module, inputs=[], outputs=[motion_module_dropdown])
|
||||
|
||||
|
||||
base_model_dropdown = gr.Dropdown(
|
||||
label="Select base Dreambooth model (required)",
|
||||
choices=controller.personalized_model_list,
|
||||
value=default_dreambooth_model,
|
||||
interactive=True,
|
||||
)
|
||||
base_model_dropdown.change(fn=controller.update_base_model, inputs=[base_model_dropdown], outputs=[base_model_dropdown])
|
||||
|
||||
lora_model_dropdown = gr.Dropdown(
|
||||
label="Select LoRA model (optional)",
|
||||
@@ -257,18 +265,24 @@ def ui():
|
||||
value="none",
|
||||
interactive=True,
|
||||
)
|
||||
lora_model_dropdown.change(fn=controller.update_lora_model, inputs=[lora_model_dropdown], outputs=[lora_model_dropdown])
|
||||
|
||||
lora_alpha_slider = gr.Slider(label="LoRA alpha", value=0.8, minimum=0, maximum=2, interactive=True)
|
||||
lora_alpha_dropdown = gr.Dropdown(
|
||||
label="LoRA alpha",
|
||||
choices=["0.", "0.2", "0.4", "0.6", "0.8", "1.0"],
|
||||
value="0.6",
|
||||
interactive=True,
|
||||
)
|
||||
|
||||
personalized_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
|
||||
def update_personalized_model():
|
||||
controller.refresh_stable_diffusion()
|
||||
controller.refresh_personalized_model()
|
||||
return [
|
||||
gr.Dropdown.update(choices=controller.stable_diffusion_list),
|
||||
gr.Dropdown.update(choices=controller.personalized_model_list),
|
||||
gr.Dropdown.update(choices=["none"] + controller.personalized_model_list)
|
||||
]
|
||||
personalized_refresh_button.click(fn=update_personalized_model, inputs=[], outputs=[base_model_dropdown, lora_model_dropdown])
|
||||
personalized_refresh_button.click(fn=update_personalized_model, inputs=[], outputs=[stable_diffusion_dropdown, base_model_dropdown, lora_model_dropdown])
|
||||
|
||||
with gr.Column(variant="panel"):
|
||||
gr.Markdown(
|
||||
@@ -276,37 +290,39 @@ def ui():
|
||||
### 2. Configs for AnimateDiff.
|
||||
"""
|
||||
)
|
||||
|
||||
prompt_textbox = gr.Textbox(label="Prompt", lines=2)
|
||||
negative_prompt_textbox = gr.Textbox(label="Negative prompt", lines=2)
|
||||
|
||||
prompt_textbox = gr.Textbox(label="Prompt", lines=2, value=default_prompt)
|
||||
negative_prompt_textbox = gr.Textbox(label="Negative prompt", lines=2, value=default_n_prompt)
|
||||
|
||||
with gr.Row().style(equal_height=False):
|
||||
with gr.Column():
|
||||
with gr.Row():
|
||||
sampler_dropdown = gr.Dropdown(label="Sampling method", choices=list(scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
|
||||
sampler_dropdown = gr.Dropdown(label="Sampling method", choices=list(scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
|
||||
sample_step_slider = gr.Slider(label="Sampling steps", value=25, minimum=10, maximum=100, step=1)
|
||||
|
||||
width_slider = gr.Slider(label="Width", value=512, minimum=256, maximum=1024, step=64)
|
||||
height_slider = gr.Slider(label="Height", value=512, minimum=256, maximum=1024, step=64)
|
||||
length_slider = gr.Slider(label="Animation length", value=16, minimum=8, maximum=24, step=1)
|
||||
cfg_scale_slider = gr.Slider(label="CFG Scale", value=7.5, minimum=0, maximum=20)
|
||||
width_slider = gr.Slider(label="Width", value=512, minimum=256, maximum=1024, step=64)
|
||||
height_slider = gr.Slider(label="Height", value=512, minimum=256, maximum=1024, step=64)
|
||||
length_slider = gr.Slider(label="Animation length (default: 16)", value=16, minimum=8, maximum=24, step=1)
|
||||
cfg_scale_slider = gr.Slider(label="CFG Scale", value=8.0, minimum=0, maximum=20)
|
||||
|
||||
with gr.Row():
|
||||
seed_textbox = gr.Textbox(label="Seed", value=-1)
|
||||
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
||||
seed_textbox = gr.Textbox(label="Seed (-1 for random seed)", value=default_seed)
|
||||
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
||||
seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e8)), inputs=[], outputs=[seed_textbox])
|
||||
|
||||
generate_button = gr.Button(value="Generate", variant='primary')
|
||||
|
||||
result_video = gr.Video(label="Generated Animation", interactive=False)
|
||||
|
||||
# update method
|
||||
stable_diffusion_dropdown.change(fn=controller.update_pipeline, inputs=[stable_diffusion_dropdown, motion_module_dropdown, base_model_dropdown, lora_model_dropdown, lora_alpha_dropdown, sampler_dropdown], outputs=[stable_diffusion_dropdown])
|
||||
motion_module_dropdown.change(fn=controller.update_pipeline, inputs=[stable_diffusion_dropdown, motion_module_dropdown, base_model_dropdown, lora_model_dropdown, lora_alpha_dropdown, sampler_dropdown], outputs=[motion_module_dropdown])
|
||||
base_model_dropdown.change(fn=controller.update_pipeline, inputs=[stable_diffusion_dropdown, motion_module_dropdown, base_model_dropdown, lora_model_dropdown, lora_alpha_dropdown, sampler_dropdown], outputs=[base_model_dropdown])
|
||||
lora_model_dropdown.change(fn=controller.update_pipeline, inputs=[stable_diffusion_dropdown, motion_module_dropdown, base_model_dropdown, lora_model_dropdown, lora_alpha_dropdown, sampler_dropdown], outputs=[lora_model_dropdown])
|
||||
lora_alpha_dropdown.change(fn=controller.update_pipeline_alpha, inputs=[stable_diffusion_dropdown, motion_module_dropdown, base_model_dropdown, lora_model_dropdown, lora_alpha_dropdown, sampler_dropdown], outputs=[lora_alpha_dropdown])
|
||||
|
||||
generate_button.click(
|
||||
fn=controller.animate,
|
||||
inputs=[
|
||||
stable_diffusion_dropdown,
|
||||
motion_module_dropdown,
|
||||
base_model_dropdown,
|
||||
lora_alpha_slider,
|
||||
prompt_textbox,
|
||||
negative_prompt_textbox,
|
||||
sampler_dropdown,
|
||||
|
||||
Reference in New Issue
Block a user