diff --git a/modelscope/preprocessors/templates/loader.py b/modelscope/preprocessors/templates/loader.py index 2e5d2625..2e8540e9 100644 --- a/modelscope/preprocessors/templates/loader.py +++ b/modelscope/preprocessors/templates/loader.py @@ -53,6 +53,12 @@ def no_multi_modal(): template_info = [ # llama ## "llama3" + TemplateInfo( + template_regex= + f'.*{cases("llama3.3", "llama-3.3")}.*', + modelfile_prefix= + 'https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/llama3.3', + ), TemplateInfo( template_regex= f'.*{cases("llama3.2", "llama-3.2")}.*{cases("vision")}.*', diff --git a/tests/tools/test_to_ollama.py b/tests/tools/test_to_ollama.py index d174c65f..ce190f5c 100644 --- a/tests/tools/test_to_ollama.py +++ b/tests/tools/test_to_ollama.py @@ -300,6 +300,8 @@ class TestToOllama(unittest.TestCase): 'paraphrase-multilingual') _test_check_tmpl_type('bartowski/Marco-o1-GGUF', 'marco-o1') _test_check_tmpl_type('Qwen/QwQ-32B-Preview', 'qwq') + _test_check_tmpl_type('LLM-Research/Llama-3.3-70B-Instruct', + 'llama3.3') if __name__ == '__main__':