From 7b44c5348622f56689f8788931d1bb42e14077d4 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Mon, 25 Nov 2024 16:29:24 +0800 Subject: [PATCH] add printout prompt --- modelscope/cli/llamafile.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modelscope/cli/llamafile.py b/modelscope/cli/llamafile.py index fab7a193..39d1346f 100644 --- a/modelscope/cli/llamafile.py +++ b/modelscope/cli/llamafile.py @@ -145,6 +145,8 @@ class LlamafileCMD(CLICommand): # if torch is not available, we will just assume gpu cannot be used pass if has_gpu: + print( + 'GPU detected, launching model with llamafile GPU option >>>') execute_cmd = f'{execute_cmd} -ngl 999' os.system(execute_cmd)