From c8bba782dae18b56b96d392d78da726e4c40d873 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Tue, 19 Nov 2024 22:09:05 +0800 Subject: [PATCH] add ut --- modelscope/cli/llamafile.py | 9 ++-- tests/cli/test_llamfafile_cmd.py | 75 ++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 4 deletions(-) create mode 100644 tests/cli/test_llamfafile_cmd.py diff --git a/modelscope/cli/llamafile.py b/modelscope/cli/llamafile.py index 7ae708fa..528be904 100644 --- a/modelscope/cli/llamafile.py +++ b/modelscope/cli/llamafile.py @@ -56,11 +56,12 @@ class LlamafileCMD(CLICommand): ) group.add_argument( - '--execute', + '--launch', type=str, required=False, default='True', - help='Whether to execute the downloaded llamafile, default to True' + help= + 'Whether to launch model with the downloaded llamafile, default to True.' ) group.add_argument( @@ -122,12 +123,12 @@ class LlamafileCMD(CLICommand): if sys.platform.startswith('win'): downloaded_file = self._rename_extension(downloaded_file) - if self.args.execute.lower() == 'true': + if self.args.launch.lower() == 'true': print('Launching model with llamafile:') self._execute_llamafile(downloaded_file) else: print( - f'Llamafile model downloaded to [{downloaded_file}], you may execute it separately.' + f'No Launching. Llamafile model downloaded to [{downloaded_file}], you may execute it separately.' ) def _execute_llamafile(self, file_path): diff --git a/tests/cli/test_llamfafile_cmd.py b/tests/cli/test_llamfafile_cmd.py new file mode 100644 index 00000000..616ed78c --- /dev/null +++ b/tests/cli/test_llamfafile_cmd.py @@ -0,0 +1,75 @@ +import subprocess +import unittest + + +class LlamafileCMDTest(unittest.TestCase): + + def setUp(self): + self.model_id = 'llamafile-club/mock-llamafile-repo' + self.invalid_model_id = 'llamafile-club/mock-no-valid-llamafile-repo' + self.cmd = 'llamafile' + + def test_basic(self): + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id}' + stat, output = subprocess.getstatusoutput(cmd) + self.assertEqual(stat, 0) + # default accuracy is 'q4_k_m' + self.assertTrue( + 'llamafile matching criteria found: [My-Model-14B-Q4_K_M.llamafile]' + in output) + self.assertTrue('Launching model with llamafile' in output) + + def test_given_accuracy(self): + accuracy = 'q8_0' + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --accuracy {accuracy}' + stat, output = subprocess.getstatusoutput(cmd) + self.assertEqual(stat, 0) + self.assertTrue( + 'llamafile matching criteria found: [My-Model-14B-q8_0.llamafile]' + in output) + self.assertTrue('Launching model with llamafile' in output) + + def test_given_file(self): + file = 'My-Model-14B-FP16.llamafile' + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --file {file}' + stat, output = subprocess.getstatusoutput(cmd) + self.assertEqual(stat, 0) + self.assertTrue( + 'llamafile matching criteria found: [My-Model-14B-FP16.llamafile]' + in output) + self.assertTrue('Launching model with llamafile' in output) + + def test_given_both_accuracy_and_file(self): + accuracy = 'q8_0' + file = 'My-Model-14B-FP16.llamafile' + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --file {file} --accuracy {accuracy}' + stat, output = subprocess.getstatusoutput(cmd) + # cannot provide accuracy and file at the same time + self.assertNotEquals(stat, 0) + + def test_no_match_llamafile(self): + accuracy = 'not-exist' + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --accuracy {accuracy}' + stat, output = subprocess.getstatusoutput(cmd) + self.assertEqual(stat, 0) + self.assertTrue( + 'No matched llamafile found in repo, choosing the first llamafile in repo' + in output) + self.assertTrue('Launching model with llamafile' in output) + + def test_invalid_repo(self): + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.invalid_model_id}' + stat, output = subprocess.getstatusoutput(cmd) + print(output) + self.assertNotEquals(stat, 0) + self.assertTrue('Cannot locate a valid llamafile in repo' in output) + + def test_no_execution(self): + cmd = f'python -m modelscope.cli.cli {self.cmd} --model {self.model_id} --launch False' + stat, output = subprocess.getstatusoutput(cmd) + self.assertEqual(stat, 0) + self.assertTrue( + 'llamafile matching criteria found: [My-Model-14B-Q4_K_M.llamafile]' + in output) + self.assertTrue( + 'No Launching. Llamafile model downloaded to' in output)