diff --git a/Makefile b/Makefile index e1a2261..0927cac 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ install: ## Install dependencies apt-get -y install build-essential python3-dev ffmpeg pip install --upgrade setuptools wheel pip install --upgrade pip - pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2 + pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1 pip install -r requirements.txt pip install --upgrade lxml apt-get update diff --git a/requirements.txt b/requirements.txt index b600fc9..1296ef8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numba==0.56.4 numpy==1.23.5 scipy==1.9.3 -librosa==0.9.2 +librosa==0.9.1 llvmlite==0.39.0 fairseq==0.12.2 faiss-cpu==1.7.0; sys_platform == "darwin" diff --git a/vc_infer_pipeline.py b/vc_infer_pipeline.py index eca79af..73323ac 100644 --- a/vc_infer_pipeline.py +++ b/vc_infer_pipeline.py @@ -33,7 +33,7 @@ class VC(object): def get_optimal_torch_device(index: int = 0) -> torch.device: # Get cuda device if torch.cuda.is_available(): - return torch.device("cuda:" + str(index)) # Very fast + return torch.device(f"cuda:{index % torch.cuda.device_count()}") # Very fast elif torch.backends.mps.is_available(): return torch.device("mps") # Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library