mirror of
https://github.com/modelscope/modelscope.git
synced 2026-02-24 20:19:51 +01:00
lint and fix
This commit is contained in:
@@ -7,6 +7,7 @@ repos:
|
||||
(?x)^(
|
||||
thirdparty/|
|
||||
examples/|
|
||||
modelscope/preprocessors/templates/|
|
||||
modelscope/utils/ast_index_file.py|
|
||||
modelscope/fileio/format/jsonplus.py
|
||||
)$
|
||||
@@ -17,6 +18,7 @@ repos:
|
||||
exclude: |
|
||||
(?x)^(
|
||||
examples/|
|
||||
modelscope/preprocessors/templates/|
|
||||
modelscope/utils/ast_index_file.py|
|
||||
modelscope/fileio/format/jsonplus.py
|
||||
)$
|
||||
@@ -28,6 +30,7 @@ repos:
|
||||
(?x)^(
|
||||
thirdparty/|
|
||||
examples/|
|
||||
modelscope/preprocessors/templates/|
|
||||
modelscope/utils/ast_index_file.py|
|
||||
modelscope/fileio/format/jsonplus.py
|
||||
)$
|
||||
|
||||
@@ -60,7 +60,7 @@ template_info = [
|
||||
TemplateInfo(
|
||||
template=TemplateType.llama,
|
||||
template_regex=
|
||||
f'{cases("llama", "llama-2", "mistral", "codestral", "mixtral")}{no_multi_modal()}.*{chat_suffix}.*'
|
||||
f'{cases("llama", "llama-2-", "mistral", "codestral", "mixtral")}{no_multi_modal()}.*{chat_suffix}.*'
|
||||
),
|
||||
|
||||
# qwen
|
||||
@@ -76,14 +76,17 @@ template_info = [
|
||||
template_regex=
|
||||
f'.*{cases("codeqwen1.5", "codeqwen-1.5-")}.*{chat_suffix}.*',
|
||||
modelfile_link=
|
||||
'https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/phi3.modelfile',
|
||||
'https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/codeqwen1.5.modelfile',
|
||||
),
|
||||
|
||||
# chatml
|
||||
TemplateInfo(
|
||||
template=TemplateType.chatml,
|
||||
template_regex=
|
||||
f'.*{cases("yi")}{no_multi_modal()}{no("coder")}.*{chat_suffix}.*'),
|
||||
f'.*{cases("yi")}{no_multi_modal()}{no("coder")}.*{chat_suffix}.*',
|
||||
modelfile_link=
|
||||
'https://modelscope.oss-cn-beijing.aliyuncs.com/llm_template/ollama/yi-1.5.modelfile',
|
||||
),
|
||||
|
||||
# chatml
|
||||
TemplateInfo(
|
||||
@@ -111,7 +114,7 @@ template_info = [
|
||||
TemplateInfo(
|
||||
template=TemplateType.baichuan,
|
||||
template_regex=
|
||||
f'.*{cases("baichuan")}.*{no_multi_modal()}.*{chat_suffix}.*'),
|
||||
f'.*{cases("baichuan")}{no_multi_modal()}.*{chat_suffix}.*'),
|
||||
|
||||
# codegeex
|
||||
TemplateInfo(
|
||||
|
||||
@@ -11,64 +11,78 @@ class TestToOllama(unittest.TestCase):
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
def test_load_template(self):
|
||||
template = TemplateLoader.load_by_model_id("LLM-Research/Meta-Llama-3-8B-Instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'LLM-Research/Meta-Llama-3-8B-Instruct')
|
||||
self.assertTrue(template.template_type == TemplateType.llama3)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("swift/Meta-Llama-3-70B-Instruct-AWQ")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'swift/Meta-Llama-3-70B-Instruct-AWQ')
|
||||
self.assertTrue(template.template_type == TemplateType.llama3)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("deepseek-ai/DeepSeek-V2-Lite-Chat")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'deepseek-ai/DeepSeek-V2-Lite-Chat')
|
||||
self.assertTrue(template.template_type == TemplateType.deepseek2)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("deepseek-ai/DeepSeek-V2.5")
|
||||
template = TemplateLoader.load_by_model_id('deepseek-ai/DeepSeek-V2.5')
|
||||
self.assertTrue(template.template_type == TemplateType.deepseek2_5)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("deepseek-ai/deepseek-coder-1.3b-instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'deepseek-ai/deepseek-coder-1.3b-instruct')
|
||||
self.assertTrue(template.template_type == TemplateType.deepseek_coder)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("OpenBuddy/openbuddy-deepseek-67b-v15.2")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'OpenBuddy/openbuddy-deepseek-67b-v15.2')
|
||||
self.assertTrue(template is None)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("deepseek-ai/deepseek-llm-67b-chat")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'deepseek-ai/deepseek-llm-67b-chat')
|
||||
self.assertTrue(template.template_type == TemplateType.deepseek)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("deepseek-ai/DeepSeek-Coder-V2-Instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'deepseek-ai/DeepSeek-Coder-V2-Instruct')
|
||||
self.assertTrue(template.template_type == TemplateType.deepseek2)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("01ai/Yi-1.5-9B-Chat")
|
||||
template = TemplateLoader.load_by_model_id('01ai/Yi-1.5-9B-Chat')
|
||||
self.assertTrue(template.template_type == TemplateType.chatml)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("01ai/Yi-Coder-9B-Chat")
|
||||
template = TemplateLoader.load_by_model_id('01ai/Yi-Coder-9B-Chat')
|
||||
self.assertTrue(template.template_type == TemplateType.yi_coder)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("LLM-Research/gemma-2-27b-it")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'LLM-Research/gemma-2-27b-it')
|
||||
self.assertTrue(template.template_type == TemplateType.gemma)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("AI-ModelScope/gemma-2b")
|
||||
template = TemplateLoader.load_by_model_id('AI-ModelScope/gemma-2b')
|
||||
self.assertTrue(template is None)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("AI-ModelScope/gemma-2b-instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'AI-ModelScope/gemma-2b-instruct')
|
||||
self.assertTrue(template is None)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("AI-ModelScope/gemma2-2b-instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'AI-ModelScope/gemma2-2b-instruct')
|
||||
self.assertTrue(template.template_type == TemplateType.gemma)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("AI-ModelScope/paligemma-3b-mix-224")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'AI-ModelScope/paligemma-3b-mix-224')
|
||||
self.assertTrue(template is None)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("LLM-Research/Phi-3-vision-128k-instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'LLM-Research/Phi-3-vision-128k-instruct')
|
||||
self.assertTrue(template is None)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("LLM-Research/Phi-3-128k-instruct")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'LLM-Research/Phi-3-128k-instruct')
|
||||
self.assertTrue(template.template_type == TemplateType.phi3)
|
||||
|
||||
template = TemplateLoader.load_by_model_id("LLM-Research/Phi-3-128k-instruct-GGUF")
|
||||
template = TemplateLoader.load_by_model_id(
|
||||
'LLM-Research/Phi-3-128k-instruct-GGUF')
|
||||
self.assertTrue(template.template_type == TemplateType.phi3)
|
||||
|
||||
|
||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level')
|
||||
def test_load_ollama(self):
|
||||
ollama = TemplateLoader.to_ollama("AI-ModelScope/gemma2-2b-instruct-GGUF")
|
||||
ollama = TemplateLoader.to_ollama(
|
||||
'AI-ModelScope/gemma2-2b-instruct-GGUF')
|
||||
self.assertTrue(ollama is not None)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user