mirror of
https://github.com/guoyww/AnimateDiff.git
synced 2026-04-03 09:46:36 +02:00
update
This commit is contained in:
@@ -92,20 +92,17 @@ class TemporalTransformer3DModel(nn.Module):
|
|||||||
attention_head_dim,
|
attention_head_dim,
|
||||||
|
|
||||||
num_layers,
|
num_layers,
|
||||||
attention_block_types=(
|
attention_block_types = ( "Temporal_Self", "Temporal_Self", ),
|
||||||
"Temporal_Self",
|
dropout = 0.0,
|
||||||
"Temporal_Self",
|
norm_num_groups = 32,
|
||||||
),
|
cross_attention_dim = 768,
|
||||||
dropout=0.0,
|
activation_fn = "geglu",
|
||||||
norm_num_groups=32,
|
attention_bias = False,
|
||||||
cross_attention_dim=768,
|
upcast_attention = False,
|
||||||
activation_fn="geglu",
|
|
||||||
attention_bias=False,
|
|
||||||
upcast_attention=False,
|
|
||||||
|
|
||||||
cross_frame_attention_mode=None,
|
cross_frame_attention_mode = None,
|
||||||
temporal_position_encoding=False,
|
temporal_position_encoding = False,
|
||||||
temporal_position_encoding_max_len=24,
|
temporal_position_encoding_max_len = 24,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
@@ -228,10 +225,14 @@ class TemporalTransformerBlock(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class PositionalEncoding(nn.Module):
|
class PositionalEncoding(nn.Module):
|
||||||
def __init__(self, d_model: int, dropout: float = 0., max_len: int = 24):
|
def __init__(
|
||||||
|
self,
|
||||||
|
d_model,
|
||||||
|
dropout = 0.,
|
||||||
|
max_len = 24
|
||||||
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.dropout = nn.Dropout(p=dropout)
|
self.dropout = nn.Dropout(p=dropout)
|
||||||
# print(f"d_model: {d_model}")
|
|
||||||
position = torch.arange(max_len).unsqueeze(1)
|
position = torch.arange(max_len).unsqueeze(1)
|
||||||
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
|
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
|
||||||
pe = torch.zeros(1, max_len, d_model)
|
pe = torch.zeros(1, max_len, d_model)
|
||||||
@@ -247,10 +248,10 @@ class PositionalEncoding(nn.Module):
|
|||||||
class VersatileAttention(CrossAttention):
|
class VersatileAttention(CrossAttention):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
attention_mode=None,
|
attention_mode = None,
|
||||||
cross_frame_attention_mode=None,
|
cross_frame_attention_mode = None,
|
||||||
temporal_position_encoding=False,
|
temporal_position_encoding = False,
|
||||||
temporal_position_encoding_max_len=24,
|
temporal_position_encoding_max_len = 24,
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
):
|
):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|||||||
Reference in New Issue
Block a user