diff --git a/modelscope/cli/llamafile.py b/modelscope/cli/llamafile.py index fab7a193..39d1346f 100644 --- a/modelscope/cli/llamafile.py +++ b/modelscope/cli/llamafile.py @@ -145,6 +145,8 @@ class LlamafileCMD(CLICommand): # if torch is not available, we will just assume gpu cannot be used pass if has_gpu: + print( + 'GPU detected, launching model with llamafile GPU option >>>') execute_cmd = f'{execute_cmd} -ngl 999' os.system(execute_cmd)