mirror of
https://github.com/guoyww/AnimateDiff.git
synced 2026-04-03 09:46:36 +02:00
update
This commit is contained in:
382
app.py
382
app.py
@@ -1,17 +1,15 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import json
|
|
||||||
import torch
|
import torch
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
from glob import glob
|
from glob import glob
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
from datetime import datetime
|
|
||||||
from safetensors import safe_open
|
from safetensors import safe_open
|
||||||
|
|
||||||
from diffusers import AutoencoderKL
|
from diffusers import AutoencoderKL
|
||||||
from diffusers import DDIMScheduler, EulerDiscreteScheduler, PNDMScheduler
|
from diffusers import EulerDiscreteScheduler, DDIMScheduler
|
||||||
from diffusers.utils.import_utils import is_xformers_available
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from transformers import CLIPTextModel, CLIPTokenizer
|
from transformers import CLIPTextModel, CLIPTokenizer
|
||||||
|
|
||||||
@@ -19,15 +17,10 @@ from animatediff.models.unet import UNet3DConditionModel
|
|||||||
from animatediff.pipelines.pipeline_animation import AnimationPipeline
|
from animatediff.pipelines.pipeline_animation import AnimationPipeline
|
||||||
from animatediff.utils.util import save_videos_grid
|
from animatediff.utils.util import save_videos_grid
|
||||||
from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint
|
from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint
|
||||||
from animatediff.utils.convert_lora_safetensor_to_diffusers import convert_lora
|
|
||||||
|
|
||||||
|
|
||||||
sample_idx = 0
|
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5"
|
||||||
scheduler_dict = {
|
inference_config_path = "configs/inference/inference.yaml"
|
||||||
"Euler": EulerDiscreteScheduler,
|
|
||||||
"PNDM": PNDMScheduler,
|
|
||||||
"DDIM": DDIMScheduler,
|
|
||||||
}
|
|
||||||
|
|
||||||
css = """
|
css = """
|
||||||
.toolbutton {
|
.toolbutton {
|
||||||
@@ -38,6 +31,49 @@ css = """
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
examples = [
|
||||||
|
# 1-ToonYou
|
||||||
|
[
|
||||||
|
"toonyou_beta3.safetensors",
|
||||||
|
"mm_sd_v14.ckpt",
|
||||||
|
"masterpiece, best quality, 1girl, solo, cherry blossoms, hanami, pink flower, white flower, spring season, wisteria, petals, flower, plum blossoms, outdoors, falling petals, white hair, black eyes",
|
||||||
|
"worst quality, low quality, nsfw, logo",
|
||||||
|
512, 512, "13204175718326964000"
|
||||||
|
],
|
||||||
|
# 2-Lyriel
|
||||||
|
[
|
||||||
|
"lyriel_v16.safetensors",
|
||||||
|
"mm_sd_v15.ckpt",
|
||||||
|
"A forbidden castle high up in the mountains, pixel art, intricate details2, hdr, intricate details, hyperdetailed5, natural skin texture, hyperrealism, soft light, sharp, game art, key visual, surreal",
|
||||||
|
"3d, cartoon, anime, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, bad anatomy, girl, loli, young, large breasts, red eyes, muscular",
|
||||||
|
512, 512, "6681501646976930000"
|
||||||
|
],
|
||||||
|
# 3-RCNZ
|
||||||
|
[
|
||||||
|
"rcnzCartoon3d_v10.safetensors",
|
||||||
|
"mm_sd_v14.ckpt",
|
||||||
|
"Jane Eyre with headphones, natural skin texture,4mm,k textures, soft cinematic light, adobe lightroom, photolab, hdr, intricate, elegant, highly detailed, sharp focus, cinematic look, soothing tones, insane details, intricate details, hyperdetailed, low contrast, soft cinematic light, dim colors, exposure blend, hdr, faded",
|
||||||
|
"deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
|
||||||
|
512, 512, "2416282124261060"
|
||||||
|
],
|
||||||
|
# 4-MajicMix
|
||||||
|
[
|
||||||
|
"majicmixRealistic_v5Preview.safetensors",
|
||||||
|
"mm_sd_v14.ckpt",
|
||||||
|
"1girl, offshoulder, light smile, shiny skin best quality, masterpiece, photorealistic",
|
||||||
|
"bad hand, worst quality, low quality, normal quality, lowres, bad anatomy, bad hands, watermark, moles",
|
||||||
|
512, 512, "7132772652786303"
|
||||||
|
],
|
||||||
|
# 5-RealisticVision
|
||||||
|
[
|
||||||
|
"realisticVisionV20_v20.safetensors",
|
||||||
|
"mm_sd_v15.ckpt",
|
||||||
|
"photo of coastline, rocks, storm weather, wind, waves, lightning, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3",
|
||||||
|
"blur, haze, deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers, deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation",
|
||||||
|
512, 512, "1490157606650685400"
|
||||||
|
]
|
||||||
|
]
|
||||||
|
|
||||||
class AnimateController:
|
class AnimateController:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
||||||
@@ -46,156 +82,120 @@ class AnimateController:
|
|||||||
self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion")
|
self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion")
|
||||||
self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module")
|
self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module")
|
||||||
self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA")
|
self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA")
|
||||||
self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S"))
|
self.savedir = os.path.join(self.basedir, "samples")
|
||||||
self.savedir_sample = os.path.join(self.savedir, "sample")
|
|
||||||
os.makedirs(self.savedir, exist_ok=True)
|
os.makedirs(self.savedir, exist_ok=True)
|
||||||
|
|
||||||
self.stable_diffusion_list = []
|
self.base_model_list = []
|
||||||
self.motion_module_list = []
|
self.motion_module_list = []
|
||||||
self.personalized_model_list = []
|
|
||||||
|
self.selected_base_model = None
|
||||||
|
self.selected_motion_module = None
|
||||||
|
|
||||||
self.refresh_stable_diffusion()
|
|
||||||
self.refresh_motion_module()
|
self.refresh_motion_module()
|
||||||
self.refresh_personalized_model()
|
self.refresh_personalized_model()
|
||||||
|
|
||||||
# config models
|
# config models
|
||||||
self.tokenizer = None
|
self.inference_config = OmegaConf.load(inference_config_path)
|
||||||
self.text_encoder = None
|
|
||||||
self.vae = None
|
self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
||||||
self.unet = None
|
self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda()
|
||||||
self.pipeline = None
|
self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
|
||||||
self.lora_model_state_dict = {}
|
self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
|
||||||
|
|
||||||
|
self.update_base_model(self.base_model_list[0])
|
||||||
|
self.update_motion_module(self.motion_module_list[0])
|
||||||
|
|
||||||
|
|
||||||
self.inference_config = OmegaConf.load("configs/inference/inference.yaml")
|
|
||||||
|
|
||||||
def refresh_stable_diffusion(self):
|
|
||||||
self.stable_diffusion_list = glob(os.path.join(self.stable_diffusion_dir, "*/"))
|
|
||||||
|
|
||||||
def refresh_motion_module(self):
|
def refresh_motion_module(self):
|
||||||
motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt"))
|
motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt"))
|
||||||
self.motion_module_list = [os.path.basename(p) for p in motion_module_list]
|
self.motion_module_list = [os.path.basename(p) for p in motion_module_list]
|
||||||
|
|
||||||
def refresh_personalized_model(self):
|
def refresh_personalized_model(self):
|
||||||
personalized_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors"))
|
base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors"))
|
||||||
self.personalized_model_list = [os.path.basename(p) for p in personalized_model_list]
|
self.base_model_list = [os.path.basename(p) for p in base_model_list]
|
||||||
|
|
||||||
def update_stable_diffusion(self, stable_diffusion_dropdown):
|
|
||||||
self.tokenizer = CLIPTokenizer.from_pretrained(stable_diffusion_dropdown, subfolder="tokenizer")
|
def update_base_model(self, base_model_dropdown):
|
||||||
self.text_encoder = CLIPTextModel.from_pretrained(stable_diffusion_dropdown, subfolder="text_encoder").cuda()
|
self.selected_base_model = base_model_dropdown
|
||||||
self.vae = AutoencoderKL.from_pretrained(stable_diffusion_dropdown, subfolder="vae").cuda()
|
|
||||||
self.unet = UNet3DConditionModel.from_pretrained_2d(stable_diffusion_dropdown, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
|
base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown)
|
||||||
|
base_model_state_dict = {}
|
||||||
|
with safe_open(base_model_dropdown, framework="pt", device="cpu") as f:
|
||||||
|
for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key)
|
||||||
|
|
||||||
|
converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config)
|
||||||
|
self.vae.load_state_dict(converted_vae_checkpoint)
|
||||||
|
|
||||||
|
converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config)
|
||||||
|
self.unet.load_state_dict(converted_unet_checkpoint, strict=False)
|
||||||
|
|
||||||
|
self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict)
|
||||||
return gr.Dropdown.update()
|
return gr.Dropdown.update()
|
||||||
|
|
||||||
def update_motion_module(self, motion_module_dropdown):
|
def update_motion_module(self, motion_module_dropdown):
|
||||||
if self.unet is None:
|
self.selected_motion_module = motion_module_dropdown
|
||||||
gr.Info(f"Please select a pretrained model path.")
|
|
||||||
return gr.Dropdown.update(value=None)
|
motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown)
|
||||||
else:
|
motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu")
|
||||||
motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown)
|
_, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False)
|
||||||
motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu")
|
assert len(unexpected) == 0
|
||||||
missing, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False)
|
|
||||||
assert len(unexpected) == 0
|
|
||||||
return gr.Dropdown.update()
|
|
||||||
|
|
||||||
def update_base_model(self, base_model_dropdown):
|
|
||||||
if self.unet is None:
|
|
||||||
gr.Info(f"Please select a pretrained model path.")
|
|
||||||
return gr.Dropdown.update(value=None)
|
|
||||||
else:
|
|
||||||
base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown)
|
|
||||||
base_model_state_dict = {}
|
|
||||||
with safe_open(base_model_dropdown, framework="pt", device="cpu") as f:
|
|
||||||
for key in f.keys():
|
|
||||||
base_model_state_dict[key] = f.get_tensor(key)
|
|
||||||
|
|
||||||
converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config)
|
|
||||||
self.vae.load_state_dict(converted_vae_checkpoint)
|
|
||||||
|
|
||||||
converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config)
|
|
||||||
self.unet.load_state_dict(converted_unet_checkpoint, strict=False)
|
|
||||||
|
|
||||||
self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict)
|
|
||||||
return gr.Dropdown.update()
|
|
||||||
|
|
||||||
def update_lora_model(self, lora_model_dropdown):
|
|
||||||
lora_model_dropdown = os.path.join(self.personalized_model_dir, lora_model_dropdown)
|
|
||||||
self.lora_model_state_dict = {}
|
|
||||||
if lora_model_dropdown == "none": pass
|
|
||||||
else:
|
|
||||||
with safe_open(lora_model_dropdown, framework="pt", device="cpu") as f:
|
|
||||||
for key in f.keys():
|
|
||||||
self.lora_model_state_dict[key] = f.get_tensor(key)
|
|
||||||
return gr.Dropdown.update()
|
return gr.Dropdown.update()
|
||||||
|
|
||||||
|
|
||||||
def animate(
|
def animate(
|
||||||
self,
|
self,
|
||||||
stable_diffusion_dropdown,
|
|
||||||
motion_module_dropdown,
|
|
||||||
base_model_dropdown,
|
base_model_dropdown,
|
||||||
lora_alpha_slider,
|
motion_module_dropdown,
|
||||||
prompt_textbox,
|
prompt_textbox,
|
||||||
negative_prompt_textbox,
|
negative_prompt_textbox,
|
||||||
sampler_dropdown,
|
width_slider,
|
||||||
sample_step_slider,
|
height_slider,
|
||||||
width_slider,
|
seed_textbox,
|
||||||
length_slider,
|
):
|
||||||
height_slider,
|
if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
|
||||||
cfg_scale_slider,
|
if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown)
|
||||||
seed_textbox
|
|
||||||
):
|
|
||||||
if self.unet is None:
|
|
||||||
raise gr.Error(f"Please select a pretrained model path.")
|
|
||||||
if motion_module_dropdown == "":
|
|
||||||
raise gr.Error(f"Please select a motion module.")
|
|
||||||
if base_model_dropdown == "":
|
|
||||||
raise gr.Error(f"Please select a base DreamBooth model.")
|
|
||||||
|
|
||||||
if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
|
if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention()
|
||||||
|
|
||||||
pipeline = AnimationPipeline(
|
pipeline = AnimationPipeline(
|
||||||
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet,
|
vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet,
|
||||||
scheduler=scheduler_dict[sampler_dropdown](**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs))
|
scheduler=DDIMScheduler(**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs))
|
||||||
).to("cuda")
|
).to("cuda")
|
||||||
|
|
||||||
if self.lora_model_state_dict != {}:
|
if int(seed_textbox) > 0: seed = int(seed_textbox)
|
||||||
pipeline = convert_lora(pipeline, self.lora_model_state_dict, alpha=lora_alpha_slider)
|
else: seed = random.randint(1, 1e16)
|
||||||
|
torch.manual_seed(int(seed))
|
||||||
pipeline.to("cuda")
|
|
||||||
|
assert seed == torch.initial_seed()
|
||||||
if seed_textbox != -1 and seed_textbox != "": torch.manual_seed(int(seed_textbox))
|
print(f"### seed: {seed}")
|
||||||
else: torch.seed()
|
|
||||||
seed = torch.initial_seed()
|
generator = torch.Generator(device="cuda")
|
||||||
|
generator.manual_seed(seed)
|
||||||
|
|
||||||
sample = pipeline(
|
sample = pipeline(
|
||||||
prompt_textbox,
|
prompt_textbox,
|
||||||
negative_prompt = negative_prompt_textbox,
|
negative_prompt = negative_prompt_textbox,
|
||||||
num_inference_steps = sample_step_slider,
|
num_inference_steps = 25,
|
||||||
guidance_scale = cfg_scale_slider,
|
guidance_scale = 8.,
|
||||||
width = width_slider,
|
width = width_slider,
|
||||||
height = height_slider,
|
height = height_slider,
|
||||||
video_length = length_slider,
|
video_length = 16,
|
||||||
|
generator = generator,
|
||||||
).videos
|
).videos
|
||||||
|
|
||||||
save_sample_path = os.path.join(self.savedir_sample, f"{sample_idx}.mp4")
|
save_sample_path = os.path.join(self.savedir, f"sample.mp4")
|
||||||
save_videos_grid(sample, save_sample_path)
|
save_videos_grid(sample, save_sample_path)
|
||||||
|
|
||||||
sample_config = {
|
json_config = {
|
||||||
"prompt": prompt_textbox,
|
"prompt": prompt_textbox,
|
||||||
"n_prompt": negative_prompt_textbox,
|
"n_prompt": negative_prompt_textbox,
|
||||||
"sampler": sampler_dropdown,
|
|
||||||
"num_inference_steps": sample_step_slider,
|
|
||||||
"guidance_scale": cfg_scale_slider,
|
|
||||||
"width": width_slider,
|
"width": width_slider,
|
||||||
"height": height_slider,
|
"height": height_slider,
|
||||||
"video_length": length_slider,
|
"seed": seed,
|
||||||
"seed": seed
|
"base_model": base_model_dropdown,
|
||||||
|
"motion_module": motion_module_dropdown,
|
||||||
}
|
}
|
||||||
json_str = json.dumps(sample_config, indent=4)
|
return gr.Video.update(value=save_sample_path), gr.Json.update(value=json_config)
|
||||||
with open(os.path.join(self.savedir, "logs.json"), "a") as f:
|
|
||||||
f.write(json_str)
|
|
||||||
f.write("\n\n")
|
|
||||||
|
|
||||||
return gr.Video.update(value=save_sample_path)
|
|
||||||
|
|
||||||
|
|
||||||
controller = AnimateController()
|
controller = AnimateController()
|
||||||
@@ -205,124 +205,62 @@ def ui():
|
|||||||
with gr.Blocks(css=css) as demo:
|
with gr.Blocks(css=css) as demo:
|
||||||
gr.Markdown(
|
gr.Markdown(
|
||||||
"""
|
"""
|
||||||
# [AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning](https://arxiv.org/abs/2307.04725)
|
# AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning
|
||||||
Yuwei Guo, Ceyuan Yang*, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai (*Corresponding Author)<br>
|
Yuwei Guo, Ceyuan Yang*, Anyi Rao, Yaohui Wang, Yu Qiao, Dahua Lin, Bo Dai (*Corresponding Author)<br>
|
||||||
[Arxiv Report](https://arxiv.org/abs/2307.04725) | [Project Page](https://animatediff.github.io/) | [Github](https://github.com/guoyww/animatediff/)
|
[Arxiv Report](https://arxiv.org/abs/2307.04725) | [Project Page](https://animatediff.github.io/) | [Github](https://github.com/guoyww/animatediff/)
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
with gr.Column(variant="panel"):
|
gr.Markdown(
|
||||||
gr.Markdown(
|
"""
|
||||||
"""
|
### Quick Start
|
||||||
### 1. Model checkpoints (select pretrained model path first).
|
1. Select desired `Base DreamBooth Model`.
|
||||||
"""
|
2. Select `Motion Module` from `mm_sd_v14.ckpt` and `mm_sd_v15.ckpt`. We recommend trying both of them for the best results.
|
||||||
)
|
3. Provide `Prompt` and `Negative Prompt` for each model. You are encouraged to refer to each model's webpage on CivitAI to learn how to write prompts for them. Below are the DreamBooth models in this demo. Click to visit their homepage.
|
||||||
with gr.Row():
|
- [`toonyou_beta3.safetensors`](https://civitai.com/models/30240?modelVersionId=78775)
|
||||||
stable_diffusion_dropdown = gr.Dropdown(
|
- [`lyriel_v16.safetensors`](https://civitai.com/models/22922/lyriel)
|
||||||
label="Pretrained Model Path",
|
- [`rcnzCartoon3d_v10.safetensors`](https://civitai.com/models/66347?modelVersionId=71009)
|
||||||
choices=controller.stable_diffusion_list,
|
- [`majicmixRealistic_v5Preview.safetensors`](https://civitai.com/models/43331?modelVersionId=79068)
|
||||||
interactive=True,
|
- [`realisticVisionV20_v20.safetensors`](https://civitai.com/models/4201?modelVersionId=29460)
|
||||||
)
|
4. Click `Generate`, wait for ~1 min, and enjoy.
|
||||||
stable_diffusion_dropdown.change(fn=controller.update_stable_diffusion, inputs=[stable_diffusion_dropdown], outputs=[stable_diffusion_dropdown])
|
"""
|
||||||
|
)
|
||||||
stable_diffusion_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
|
with gr.Row():
|
||||||
def update_stable_diffusion():
|
with gr.Column():
|
||||||
controller.refresh_stable_diffusion()
|
base_model_dropdown = gr.Dropdown( label="Base DreamBooth Model", choices=controller.base_model_list, value=controller.base_model_list[0], interactive=True )
|
||||||
return gr.Dropdown.update(choices=controller.stable_diffusion_list)
|
motion_module_dropdown = gr.Dropdown( label="Motion Module", choices=controller.motion_module_list, value=controller.motion_module_list[0], interactive=True )
|
||||||
stable_diffusion_refresh_button.click(fn=update_stable_diffusion, inputs=[], outputs=[stable_diffusion_dropdown])
|
|
||||||
|
|
||||||
with gr.Row():
|
base_model_dropdown.change(fn=controller.update_base_model, inputs=[base_model_dropdown], outputs=[base_model_dropdown])
|
||||||
motion_module_dropdown = gr.Dropdown(
|
|
||||||
label="Select motion module",
|
|
||||||
choices=controller.motion_module_list,
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
motion_module_dropdown.change(fn=controller.update_motion_module, inputs=[motion_module_dropdown], outputs=[motion_module_dropdown])
|
motion_module_dropdown.change(fn=controller.update_motion_module, inputs=[motion_module_dropdown], outputs=[motion_module_dropdown])
|
||||||
|
|
||||||
motion_module_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
|
|
||||||
def update_motion_module():
|
|
||||||
controller.refresh_motion_module()
|
|
||||||
return gr.Dropdown.update(choices=controller.motion_module_list)
|
|
||||||
motion_module_refresh_button.click(fn=update_motion_module, inputs=[], outputs=[motion_module_dropdown])
|
|
||||||
|
|
||||||
base_model_dropdown = gr.Dropdown(
|
|
||||||
label="Select base Dreambooth model (required)",
|
|
||||||
choices=controller.personalized_model_list,
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
base_model_dropdown.change(fn=controller.update_base_model, inputs=[base_model_dropdown], outputs=[base_model_dropdown])
|
|
||||||
|
|
||||||
lora_model_dropdown = gr.Dropdown(
|
|
||||||
label="Select LoRA model (optional)",
|
|
||||||
choices=["none"] + controller.personalized_model_list,
|
|
||||||
value="none",
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
lora_model_dropdown.change(fn=controller.update_lora_model, inputs=[lora_model_dropdown], outputs=[lora_model_dropdown])
|
|
||||||
|
|
||||||
lora_alpha_slider = gr.Slider(label="LoRA alpha", value=0.8, minimum=0, maximum=2, interactive=True)
|
|
||||||
|
|
||||||
personalized_refresh_button = gr.Button(value="\U0001F503", elem_classes="toolbutton")
|
|
||||||
def update_personalized_model():
|
|
||||||
controller.refresh_personalized_model()
|
|
||||||
return [
|
|
||||||
gr.Dropdown.update(choices=controller.personalized_model_list),
|
|
||||||
gr.Dropdown.update(choices=["none"] + controller.personalized_model_list)
|
|
||||||
]
|
|
||||||
personalized_refresh_button.click(fn=update_personalized_model, inputs=[], outputs=[base_model_dropdown, lora_model_dropdown])
|
|
||||||
|
|
||||||
with gr.Column(variant="panel"):
|
prompt_textbox = gr.Textbox( label="Prompt", lines=3 )
|
||||||
gr.Markdown(
|
negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="worst quality, low quality, nsfw, logo")
|
||||||
"""
|
|
||||||
### 2. Configs for AnimateDiff.
|
with gr.Accordion("Advance", open=False):
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
prompt_textbox = gr.Textbox(label="Prompt", lines=2)
|
|
||||||
negative_prompt_textbox = gr.Textbox(label="Negative prompt", lines=2)
|
|
||||||
|
|
||||||
with gr.Row().style(equal_height=False):
|
|
||||||
with gr.Column():
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
sampler_dropdown = gr.Dropdown(label="Sampling method", choices=list(scheduler_dict.keys()), value=list(scheduler_dict.keys())[0])
|
width_slider = gr.Slider( label="Width", value=512, minimum=256, maximum=1024, step=64 )
|
||||||
sample_step_slider = gr.Slider(label="Sampling steps", value=25, minimum=10, maximum=100, step=1)
|
height_slider = gr.Slider( label="Height", value=512, minimum=256, maximum=1024, step=64 )
|
||||||
|
|
||||||
width_slider = gr.Slider(label="Width", value=512, minimum=256, maximum=1024, step=64)
|
|
||||||
height_slider = gr.Slider(label="Height", value=512, minimum=256, maximum=1024, step=64)
|
|
||||||
length_slider = gr.Slider(label="Animation length", value=16, minimum=8, maximum=24, step=1)
|
|
||||||
cfg_scale_slider = gr.Slider(label="CFG Scale", value=7.5, minimum=0, maximum=20)
|
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
seed_textbox = gr.Textbox(label="Seed", value=-1)
|
seed_textbox = gr.Textbox( label="Seed", value=-1)
|
||||||
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
||||||
seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e8)), inputs=[], outputs=[seed_textbox])
|
seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e16)), inputs=[], outputs=[seed_textbox])
|
||||||
|
|
||||||
generate_button = gr.Button(value="Generate", variant='primary')
|
|
||||||
|
|
||||||
result_video = gr.Video(label="Generated Animation", interactive=False)
|
|
||||||
|
|
||||||
generate_button.click(
|
generate_button = gr.Button( value="Generate", variant='primary' )
|
||||||
fn=controller.animate,
|
|
||||||
inputs=[
|
with gr.Column():
|
||||||
stable_diffusion_dropdown,
|
result_video = gr.Video( label="Generated Animation", interactive=False )
|
||||||
motion_module_dropdown,
|
json_config = gr.Json( label="Config", value=None )
|
||||||
base_model_dropdown,
|
|
||||||
lora_alpha_slider,
|
inputs = [base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox]
|
||||||
prompt_textbox,
|
outputs = [result_video, json_config]
|
||||||
negative_prompt_textbox,
|
|
||||||
sampler_dropdown,
|
|
||||||
sample_step_slider,
|
|
||||||
width_slider,
|
|
||||||
length_slider,
|
|
||||||
height_slider,
|
|
||||||
cfg_scale_slider,
|
|
||||||
seed_textbox,
|
|
||||||
],
|
|
||||||
outputs=[result_video]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
generate_button.click( fn=controller.animate, inputs=inputs, outputs=outputs )
|
||||||
|
|
||||||
|
gr.Examples( fn=controller.animate, examples=examples, inputs=inputs, outputs=outputs, cache_examples=True )
|
||||||
|
|
||||||
return demo
|
return demo
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
demo = ui()
|
demo = ui()
|
||||||
demo.launch(share=True)
|
demo.queue(max_size=20)
|
||||||
|
demo.launch()
|
||||||
|
|||||||
12
requirements.txt
Normal file
12
requirements.txt
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
torch==1.13.1
|
||||||
|
torchvision==0.14.1
|
||||||
|
torchaudio==0.13.1
|
||||||
|
diffusers==0.11.1
|
||||||
|
transformers==4.25.1
|
||||||
|
xformers==0.0.16
|
||||||
|
imageio==2.27.0
|
||||||
|
gdown
|
||||||
|
einops
|
||||||
|
omegaconf
|
||||||
|
safetensors
|
||||||
|
gradio
|
||||||
Reference in New Issue
Block a user