Changed librosa version to 0.9.1. Also fixed the cuda issue (forgot that I wasn't passing in a self parameter into the get optimal device method)

This commit is contained in:
Mangio621
2023-05-04 12:16:15 +10:00
parent bdab05320d
commit dd780d11c2
3 changed files with 3 additions and 3 deletions

View File

@@ -8,7 +8,7 @@ install: ## Install dependencies
apt-get -y install build-essential python3-dev ffmpeg
pip install --upgrade setuptools wheel
pip install --upgrade pip
pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2
pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1
pip install -r requirements.txt
pip install --upgrade lxml
apt-get update

View File

@@ -1,7 +1,7 @@
numba==0.56.4
numpy==1.23.5
scipy==1.9.3
librosa==0.9.2
librosa==0.9.1
llvmlite==0.39.0
fairseq==0.12.2
faiss-cpu==1.7.0; sys_platform == "darwin"

View File

@@ -33,7 +33,7 @@ class VC(object):
def get_optimal_torch_device(index: int = 0) -> torch.device:
# Get cuda device
if torch.cuda.is_available():
return torch.device("cuda:" + str(index)) # Very fast
return torch.device(f"cuda:{index % torch.cuda.device_count()}") # Very fast
elif torch.backends.mps.is_available():
return torch.device("mps")
# Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library