mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-24 12:09:22 +01:00
Llamafile support gpu flag (#1097)
* add gpu flag when gpu is detected * fix typo * fix typo * add printout prompt --------- Co-authored-by: Yingda Chen <yingda.chen@alibaba-inc.com>
This commit is contained in:
@@ -135,7 +135,20 @@ class LlamafileCMD(CLICommand):
|
||||
current_mode = os.stat(file_path).st_mode
|
||||
new_mode = current_mode | 0o111
|
||||
os.chmod(file_path, new_mode)
|
||||
os.system(file_path)
|
||||
execute_cmd = file_path
|
||||
has_gpu = False
|
||||
try:
|
||||
import torch
|
||||
has_gpu = torch.cuda.is_available()
|
||||
except ModuleNotFoundError:
|
||||
# we depend on torch to detect gpu.
|
||||
# if torch is not available, we will just assume gpu cannot be used
|
||||
pass
|
||||
if has_gpu:
|
||||
print(
|
||||
'GPU detected, launching model with llamafile GPU option >>>')
|
||||
execute_cmd = f'{execute_cmd} -ngl 999'
|
||||
os.system(execute_cmd)
|
||||
|
||||
def _rename_extension(self, original_file_name):
|
||||
directory, filename = os.path.split(original_file_name)
|
||||
|
||||
Reference in New Issue
Block a user