From 5ca12c6cc425f129bc0e383597b73fda7c1ed76d Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Mon, 25 Nov 2024 16:31:52 +0800 Subject: [PATCH] Llamafile support gpu flag (#1097) * add gpu flag when gpu is detected * fix typo * fix typo * add printout prompt --------- Co-authored-by: Yingda Chen --- modelscope/cli/llamafile.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/modelscope/cli/llamafile.py b/modelscope/cli/llamafile.py index 528be904..39d1346f 100644 --- a/modelscope/cli/llamafile.py +++ b/modelscope/cli/llamafile.py @@ -135,7 +135,20 @@ class LlamafileCMD(CLICommand): current_mode = os.stat(file_path).st_mode new_mode = current_mode | 0o111 os.chmod(file_path, new_mode) - os.system(file_path) + execute_cmd = file_path + has_gpu = False + try: + import torch + has_gpu = torch.cuda.is_available() + except ModuleNotFoundError: + # we depend on torch to detect gpu. + # if torch is not available, we will just assume gpu cannot be used + pass + if has_gpu: + print( + 'GPU detected, launching model with llamafile GPU option >>>') + execute_cmd = f'{execute_cmd} -ngl 999' + os.system(execute_cmd) def _rename_extension(self, original_file_name): directory, filename = os.path.split(original_file_name)