diff --git a/modelscope/preprocessors/templates/loader.py b/modelscope/preprocessors/templates/loader.py index e3ed9f89..e286802b 100644 --- a/modelscope/preprocessors/templates/loader.py +++ b/modelscope/preprocessors/templates/loader.py @@ -83,7 +83,7 @@ template_info = [ TemplateInfo( template=TemplateType.chatml, template_regex= - f'.*{cases("yi")}{no_multi_modal()}{no("coder")}.*{chat_suffix}.*', + f'.*{cases("yi")}{no_multi_modal()}{no("coder")}.*', modelfile_link= 'https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/yi-1.5.modelfile', ), @@ -110,6 +110,10 @@ template_info = [ 'https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/glm4.modelfile', ), + TemplateInfo( + template_regex=f'.*{cases("llava-llama-3")}.*', + modelfile_link='https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/llava-llama-3.modelfile'), + # baichuan TemplateInfo( template=TemplateType.baichuan, diff --git a/tests/tools/test_to_ollama.py b/tests/tools/test_to_ollama.py index aaf5f4d0..ba92c1ea 100644 --- a/tests/tools/test_to_ollama.py +++ b/tests/tools/test_to_ollama.py @@ -100,6 +100,11 @@ class TestToOllama(unittest.TestCase): ollama = TemplateLoader.to_ollama( 'QuantFactory/Mistral-Nemo-Japanese-Instruct-2408-GGUF') self.assertTrue(ollama is not None) + ollama = TemplateLoader.to_ollama('AI-ModelScope/Yi-1.5-9B-32K-GGUF') + self.assertTrue(ollama is not None) + ollama = TemplateLoader.to_ollama( + 'AI-ModelScope/llava-llama-3-8b-v1_1-gguf') + self.assertTrue(ollama is not None) if __name__ == '__main__':