mirror of
https://github.com/modelscope/modelscope.git
synced 2026-02-24 12:10:09 +01:00
add ut
This commit is contained in:
@@ -56,11 +56,12 @@ class LlamafileCMD(CLICommand):
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
'--execute',
|
||||
'--launch',
|
||||
type=str,
|
||||
required=False,
|
||||
default='True',
|
||||
help='Whether to execute the downloaded llamafile, default to True'
|
||||
help=
|
||||
'Whether to launch model with the downloaded llamafile, default to True.'
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
@@ -122,12 +123,12 @@ class LlamafileCMD(CLICommand):
|
||||
if sys.platform.startswith('win'):
|
||||
downloaded_file = self._rename_extension(downloaded_file)
|
||||
|
||||
if self.args.execute.lower() == 'true':
|
||||
if self.args.launch.lower() == 'true':
|
||||
print('Launching model with llamafile:')
|
||||
self._execute_llamafile(downloaded_file)
|
||||
else:
|
||||
print(
|
||||
f'Llamafile model downloaded to [{downloaded_file}], you may execute it separately.'
|
||||
f'No Launching. Llamafile model downloaded to [{downloaded_file}], you may execute it separately.'
|
||||
)
|
||||
|
||||
def _execute_llamafile(self, file_path):
|
||||
|
||||
75
tests/cli/test_llamfafile_cmd.py
Normal file
75
tests/cli/test_llamfafile_cmd.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import subprocess
|
||||
import unittest
|
||||
|
||||
|
||||
class LlamafileCMDTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.model_id = 'llamafile-club/mock-llamafile-repo'
|
||||
self.invalid_model_id = 'llamafile-club/mock-no-valid-llamafile-repo'
|
||||
self.cmd = 'llamafile'
|
||||
|
||||
def test_basic(self):
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id}'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
self.assertEqual(stat, 0)
|
||||
# default accuracy is 'q4_k_m'
|
||||
self.assertTrue(
|
||||
'llamafile matching criteria found: [My-Model-14B-Q4_K_M.llamafile]'
|
||||
in output)
|
||||
self.assertTrue('Launching model with llamafile' in output)
|
||||
|
||||
def test_given_accuracy(self):
|
||||
accuracy = 'q8_0'
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --accuracy {accuracy}'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
self.assertEqual(stat, 0)
|
||||
self.assertTrue(
|
||||
'llamafile matching criteria found: [My-Model-14B-q8_0.llamafile]'
|
||||
in output)
|
||||
self.assertTrue('Launching model with llamafile' in output)
|
||||
|
||||
def test_given_file(self):
|
||||
file = 'My-Model-14B-FP16.llamafile'
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --file {file}'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
self.assertEqual(stat, 0)
|
||||
self.assertTrue(
|
||||
'llamafile matching criteria found: [My-Model-14B-FP16.llamafile]'
|
||||
in output)
|
||||
self.assertTrue('Launching model with llamafile' in output)
|
||||
|
||||
def test_given_both_accuracy_and_file(self):
|
||||
accuracy = 'q8_0'
|
||||
file = 'My-Model-14B-FP16.llamafile'
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --file {file} --accuracy {accuracy}'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
# cannot provide accuracy and file at the same time
|
||||
self.assertNotEquals(stat, 0)
|
||||
|
||||
def test_no_match_llamafile(self):
|
||||
accuracy = 'not-exist'
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --accuracy {accuracy}'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
self.assertEqual(stat, 0)
|
||||
self.assertTrue(
|
||||
'No matched llamafile found in repo, choosing the first llamafile in repo'
|
||||
in output)
|
||||
self.assertTrue('Launching model with llamafile' in output)
|
||||
|
||||
def test_invalid_repo(self):
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.invalid_model_id}'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
print(output)
|
||||
self.assertNotEquals(stat, 0)
|
||||
self.assertTrue('Cannot locate a valid llamafile in repo' in output)
|
||||
|
||||
def test_no_execution(self):
|
||||
cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --launch False'
|
||||
stat, output = subprocess.getstatusoutput(cmd)
|
||||
self.assertEqual(stat, 0)
|
||||
self.assertTrue(
|
||||
'llamafile matching criteria found: [My-Model-14B-Q4_K_M.llamafile]'
|
||||
in output)
|
||||
self.assertTrue(
|
||||
'No Launching. Llamafile model downloaded to' in output)
|
||||
Reference in New Issue
Block a user