This commit is contained in:
Yuwei Guo
2023-07-11 18:12:56 +08:00
parent 81f2422dc3
commit 41a698ae8e

View File

@@ -92,10 +92,7 @@ class TemporalTransformer3DModel(nn.Module):
attention_head_dim, attention_head_dim,
num_layers, num_layers,
attention_block_types=( attention_block_types = ( "Temporal_Self", "Temporal_Self", ),
"Temporal_Self",
"Temporal_Self",
),
dropout = 0.0, dropout = 0.0,
norm_num_groups = 32, norm_num_groups = 32,
cross_attention_dim = 768, cross_attention_dim = 768,
@@ -228,10 +225,14 @@ class TemporalTransformerBlock(nn.Module):
class PositionalEncoding(nn.Module): class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0., max_len: int = 24): def __init__(
self,
d_model,
dropout = 0.,
max_len = 24
):
super().__init__() super().__init__()
self.dropout = nn.Dropout(p=dropout) self.dropout = nn.Dropout(p=dropout)
# print(f"d_model: {d_model}")
position = torch.arange(max_len).unsqueeze(1) position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(1, max_len, d_model) pe = torch.zeros(1, max_len, d_model)