mirror of
https://github.com/Mangio621/Mangio-RVC-Fork.git
synced 2026-02-24 03:49:51 +01:00
Changed librosa version to 0.9.1. Also fixed the cuda issue (forgot that I wasn't passing in a self parameter into the get optimal device method)
This commit is contained in:
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ install: ## Install dependencies
|
||||
apt-get -y install build-essential python3-dev ffmpeg
|
||||
pip install --upgrade setuptools wheel
|
||||
pip install --upgrade pip
|
||||
pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2
|
||||
pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1
|
||||
pip install -r requirements.txt
|
||||
pip install --upgrade lxml
|
||||
apt-get update
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
numba==0.56.4
|
||||
numpy==1.23.5
|
||||
scipy==1.9.3
|
||||
librosa==0.9.2
|
||||
librosa==0.9.1
|
||||
llvmlite==0.39.0
|
||||
fairseq==0.12.2
|
||||
faiss-cpu==1.7.0; sys_platform == "darwin"
|
||||
|
||||
@@ -33,7 +33,7 @@ class VC(object):
|
||||
def get_optimal_torch_device(index: int = 0) -> torch.device:
|
||||
# Get cuda device
|
||||
if torch.cuda.is_available():
|
||||
return torch.device("cuda:" + str(index)) # Very fast
|
||||
return torch.device(f"cuda:{index % torch.cuda.device_count()}") # Very fast
|
||||
elif torch.backends.mps.is_available():
|
||||
return torch.device("mps")
|
||||
# Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library
|
||||
|
||||
Reference in New Issue
Block a user