From e32667e045bb0f648ae8a11133fb2fbab83ded23 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Mon, 25 Nov 2024 14:54:38 +0800 Subject: [PATCH] add gpu flag when gpu is detected --- modelscope/cli/llamafile.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/modelscope/cli/llamafile.py b/modelscope/cli/llamafile.py index 528be904..56bdcb9d 100644 --- a/modelscope/cli/llamafile.py +++ b/modelscope/cli/llamafile.py @@ -135,7 +135,18 @@ class LlamafileCMD(CLICommand): current_mode = os.stat(file_path).st_mode new_mode = current_mode | 0o111 os.chmod(file_path, new_mode) - os.system(file_path) + execute_cmd = file_path + has_gpu = False + try: + import torch + has_gpu = torch.cuda.is_available() + except ModuleNotFoundError: + # we depend on torch to detect gpu. + # if torch is not available, we will just assume gpu cannot be used + pass + if has_gpu: + execute_cmd = f'{execute_cmd}qq' + os.system(execute_cmd) def _rename_extension(self, original_file_name): directory, filename = os.path.split(original_file_name)