mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2025-12-16 11:48:08 +01:00
chore(format): run black on dev
This commit is contained in:
@@ -400,13 +400,17 @@ class SineGen(torch.nn.Module):
|
||||
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
||||
idx + 2
|
||||
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
||||
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
||||
rad_values = (
|
||||
f0_buf / self.sampling_rate
|
||||
) % 1 ###%1意味着n_har的乘积无法后处理优化
|
||||
rand_ini = torch.rand(
|
||||
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
||||
)
|
||||
rand_ini[:, 0] = 0
|
||||
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
||||
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
||||
tmp_over_one = torch.cumsum(
|
||||
rad_values, 1
|
||||
) # % 1 #####%1意味着后面的cumsum无法再优化
|
||||
tmp_over_one *= upp
|
||||
tmp_over_one = F.interpolate(
|
||||
tmp_over_one.transpose(2, 1),
|
||||
|
||||
@@ -333,13 +333,17 @@ class SineGen(torch.nn.Module):
|
||||
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
||||
idx + 2
|
||||
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
||||
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
||||
rad_values = (
|
||||
f0_buf / self.sampling_rate
|
||||
) % 1 ###%1意味着n_har的乘积无法后处理优化
|
||||
rand_ini = torch.rand(
|
||||
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
||||
)
|
||||
rand_ini[:, 0] = 0
|
||||
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
||||
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
||||
tmp_over_one = torch.cumsum(
|
||||
rad_values, 1
|
||||
) # % 1 #####%1意味着后面的cumsum无法再优化
|
||||
tmp_over_one *= upp
|
||||
tmp_over_one = F.interpolate(
|
||||
tmp_over_one.transpose(2, 1),
|
||||
|
||||
@@ -62,12 +62,12 @@ def torch_bmm(input, mat2, *, out=None):
|
||||
): # pylint: disable=invalid-name
|
||||
start_idx_2 = i2 * split_2_slice_size
|
||||
end_idx_2 = (i2 + 1) * split_2_slice_size
|
||||
hidden_states[
|
||||
start_idx:end_idx, start_idx_2:end_idx_2
|
||||
] = original_torch_bmm(
|
||||
input[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
mat2[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
out=out,
|
||||
hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = (
|
||||
original_torch_bmm(
|
||||
input[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
mat2[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
out=out,
|
||||
)
|
||||
)
|
||||
else:
|
||||
hidden_states[start_idx:end_idx] = original_torch_bmm(
|
||||
@@ -138,61 +138,67 @@ def scaled_dot_product_attention(
|
||||
start_idx_2 = i2 * split_2_slice_size
|
||||
end_idx_2 = (i2 + 1) * split_2_slice_size
|
||||
if no_shape_one:
|
||||
hidden_states[
|
||||
start_idx:end_idx, start_idx_2:end_idx_2
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=attn_mask[
|
||||
start_idx:end_idx, start_idx_2:end_idx_2
|
||||
]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = (
|
||||
original_scaled_dot_product_attention(
|
||||
query[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=(
|
||||
attn_mask[start_idx:end_idx, start_idx_2:end_idx_2]
|
||||
if attn_mask is not None
|
||||
else attn_mask
|
||||
),
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
)
|
||||
else:
|
||||
hidden_states[
|
||||
:, start_idx:end_idx, start_idx_2:end_idx_2
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=attn_mask[
|
||||
:, start_idx:end_idx, start_idx_2:end_idx_2
|
||||
]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
hidden_states[:, start_idx:end_idx, start_idx_2:end_idx_2] = (
|
||||
original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=(
|
||||
attn_mask[
|
||||
:, start_idx:end_idx, start_idx_2:end_idx_2
|
||||
]
|
||||
if attn_mask is not None
|
||||
else attn_mask
|
||||
),
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
)
|
||||
else:
|
||||
if no_shape_one:
|
||||
hidden_states[
|
||||
start_idx:end_idx
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[start_idx:end_idx],
|
||||
key[start_idx:end_idx],
|
||||
value[start_idx:end_idx],
|
||||
attn_mask=attn_mask[start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
hidden_states[start_idx:end_idx] = (
|
||||
original_scaled_dot_product_attention(
|
||||
query[start_idx:end_idx],
|
||||
key[start_idx:end_idx],
|
||||
value[start_idx:end_idx],
|
||||
attn_mask=(
|
||||
attn_mask[start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask
|
||||
),
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
)
|
||||
else:
|
||||
hidden_states[
|
||||
:, start_idx:end_idx
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx],
|
||||
key[:, start_idx:end_idx],
|
||||
value[:, start_idx:end_idx],
|
||||
attn_mask=attn_mask[:, start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
hidden_states[:, start_idx:end_idx] = (
|
||||
original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx],
|
||||
key[:, start_idx:end_idx],
|
||||
value[:, start_idx:end_idx],
|
||||
attn_mask=(
|
||||
attn_mask[:, start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask
|
||||
),
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
)
|
||||
else:
|
||||
return original_scaled_dot_product_attention(
|
||||
|
||||
@@ -104,11 +104,11 @@ def return_xpu(device):
|
||||
return (
|
||||
f"xpu:{device[-1]}"
|
||||
if isinstance(device, str) and ":" in device
|
||||
else f"xpu:{device}"
|
||||
if isinstance(device, int)
|
||||
else torch.device("xpu")
|
||||
if isinstance(device, torch.device)
|
||||
else "xpu"
|
||||
else (
|
||||
f"xpu:{device}"
|
||||
if isinstance(device, int)
|
||||
else torch.device("xpu") if isinstance(device, torch.device) else "xpu"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -271,12 +271,16 @@ def ipex_hijacks():
|
||||
"torch.batch_norm",
|
||||
lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(
|
||||
input,
|
||||
weight
|
||||
if weight is not None
|
||||
else torch.ones(input.size()[1], device=input.device),
|
||||
bias
|
||||
if bias is not None
|
||||
else torch.zeros(input.size()[1], device=input.device),
|
||||
(
|
||||
weight
|
||||
if weight is not None
|
||||
else torch.ones(input.size()[1], device=input.device)
|
||||
),
|
||||
(
|
||||
bias
|
||||
if bias is not None
|
||||
else torch.zeros(input.size()[1], device=input.device)
|
||||
),
|
||||
*args,
|
||||
**kwargs,
|
||||
),
|
||||
@@ -286,12 +290,16 @@ def ipex_hijacks():
|
||||
"torch.instance_norm",
|
||||
lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(
|
||||
input,
|
||||
weight
|
||||
if weight is not None
|
||||
else torch.ones(input.size()[1], device=input.device),
|
||||
bias
|
||||
if bias is not None
|
||||
else torch.zeros(input.size()[1], device=input.device),
|
||||
(
|
||||
weight
|
||||
if weight is not None
|
||||
else torch.ones(input.size()[1], device=input.device)
|
||||
),
|
||||
(
|
||||
bias
|
||||
if bias is not None
|
||||
else torch.zeros(input.size()[1], device=input.device)
|
||||
),
|
||||
*args,
|
||||
**kwargs,
|
||||
),
|
||||
|
||||
@@ -116,9 +116,11 @@ else:
|
||||
feats = readwave(wav_path, normalize=saved_cfg.task.normalize)
|
||||
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
||||
inputs = {
|
||||
"source": feats.half().to(device)
|
||||
if is_half and device not in ["mps", "cpu"]
|
||||
else feats.to(device),
|
||||
"source": (
|
||||
feats.half().to(device)
|
||||
if is_half and device not in ["mps", "cpu"]
|
||||
else feats.to(device)
|
||||
),
|
||||
"padding_mask": padding_mask.to(device),
|
||||
"output_layer": 9 if version == "v1" else 12, # layer 9
|
||||
}
|
||||
|
||||
@@ -38,26 +38,28 @@ class VC:
|
||||
|
||||
to_return_protect0 = {
|
||||
"visible": self.if_f0 != 0,
|
||||
"value": to_return_protect[0]
|
||||
if self.if_f0 != 0 and to_return_protect
|
||||
else 0.5,
|
||||
"value": (
|
||||
to_return_protect[0] if self.if_f0 != 0 and to_return_protect else 0.5
|
||||
),
|
||||
"__type__": "update",
|
||||
}
|
||||
to_return_protect1 = {
|
||||
"visible": self.if_f0 != 0,
|
||||
"value": to_return_protect[1]
|
||||
if self.if_f0 != 0 and to_return_protect
|
||||
else 0.33,
|
||||
"value": (
|
||||
to_return_protect[1] if self.if_f0 != 0 and to_return_protect else 0.33
|
||||
),
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
if sid == "" or sid == []:
|
||||
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
|
||||
if (
|
||||
self.hubert_model is not None
|
||||
): # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
|
||||
logger.info("Clean model cache")
|
||||
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
|
||||
self.hubert_model = (
|
||||
self.net_g
|
||||
) = self.n_spk = self.hubert_model = self.tgt_sr = None
|
||||
self.hubert_model = self.net_g = self.n_spk = self.hubert_model = (
|
||||
self.tgt_sr
|
||||
) = None
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
###楼下不这么折腾清理不干净
|
||||
|
||||
Reference in New Issue
Block a user