diff --git a/README.md b/README.md index 468e0555..dd6d3350 100644 --- a/README.md +++ b/README.md @@ -202,7 +202,7 @@ CPU docker image registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.6.1 # py38 -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-1.6.1 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch2.0.1-tf2.13.0-1.9.5 ``` GPU docker image @@ -211,7 +211,7 @@ GPU docker image registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1 # py38 -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-1.6.1 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.8.0-py38-torch2.0.1-tf2.13.0-1.9.5 ``` ## Setup Local Python Environment @@ -220,7 +220,7 @@ One can also set up local ModelScope environment using pip and conda. ModelScop We suggest [anaconda](https://docs.anaconda.com/anaconda/install/) for creating local python environment: ```shell -conda create -n modelscope python=3.9 +conda create -n modelscope python=3.8 conda activate modelscope ``` diff --git a/README_ja.md b/README_ja.md index 073b0c48..4523add4 100644 --- a/README_ja.md +++ b/README_ja.md @@ -208,7 +208,7 @@ CPU docker イメージ registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.6.1 # py38 -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-1.6.1 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch2.0.1-tf2.13.0-1.9.5 ``` GPU docker イメージ @@ -217,7 +217,7 @@ GPU docker イメージ registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1 # py38 -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-1.6.1 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.8.0-py38-torch2.0.1-tf2.13.0-1.9.5 ``` ## ローカル Python 環境のセットアップ diff --git a/README_zh.md b/README_zh.md index 77fe684a..10b2e728 100644 --- a/README_zh.md +++ b/README_zh.md @@ -53,19 +53,19 @@ ModelScope开源了数百个(当前700+)模型,涵盖自然语言处理、计 自然语言处理: * [ChatGLM3-6B](https://modelscope.cn/models/ZhipuAI/chatglm3-6b/summary) - + * [Qwen-14B-Chat](https://modelscope.cn/models/qwen/Qwen-14B-Chat/summary) - + * [Baichuan2-13B-Chat](https://modelscope.cn/models/baichuan-inc/Baichuan2-13B-Chat/summary) - + * [Ziya2-13B-Chat](https://modelscope.cn/models/Fengshenbang/Ziya2-13B-Chat/summary) - + * [Internlm-chat-20b](https://modelscope.cn/models/Shanghai_AI_Laboratory/internlm-chat-20b/summary) - + * [Udever-bloom-1b1](https://modelscope.cn/models/damo/udever-bloom-1b1/summary) - + * [CoROM文本向量-中文-电商领域-base](https://modelscope.cn/models/damo/nlp_corom_sentence-embedding_chinese-base-ecom/summary) - + * [MGeo地址相似度匹配实体对齐-中文-地址领域-base](https://modelscope.cn/models/damo/mgeo_geographic_entity_alignment_chinese_base/summary) 多模态: @@ -84,7 +84,7 @@ ModelScope开源了数百个(当前700+)模型,涵盖自然语言处理、计 计算机视觉: * [DamoFD人脸检测关键点模型-0.5G](https://modelscope.cn/models/damo/cv_ddsar_face-detection_iclr23-damofd/summary) - + * [BSHM人像抠图](https://modelscope.cn/models/damo/cv_unet_image-matting/summary) * [DCT-Net人像卡通化-3D](https://modelscope.cn/models/damo/cv_unet_person-image-cartoon-3d_compound-models/summary) @@ -195,7 +195,7 @@ CPU镜像 registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.6.1 # py38 -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-1.6.1 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch2.0.1-tf2.13.0-1.9.5 ``` GPU镜像 @@ -204,14 +204,14 @@ GPU镜像 registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1 # py38 -registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-1.6.1 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.8.0-py38-torch2.0.1-tf2.13.0-1.9.5 ``` ## 搭建本地Python环境 -你也可以使用pip和conda搭建本地python环境,我们推荐使用[Anaconda](https://docs.anaconda.com/anaconda/install/),安装完成后,执行如下命令为modelscope library创建对应的python环境: +你也可以使用pip和conda搭建本地python环境,ModelScope支持python3.7+以上环境,我们推荐使用[Anaconda](https://docs.anaconda.com/anaconda/install/),安装完成后,执行如下命令为modelscope library创建对应的python环境: ```shell -conda create -n modelscope python=3.7 +conda create -n modelscope python=3.8 conda activate modelscope ``` diff --git a/examples/apps/llm_riddles/app.py b/examples/apps/llm_riddles/app.py index 164faeee..94432043 100644 --- a/examples/apps/llm_riddles/app.py +++ b/examples/apps/llm_riddles/app.py @@ -115,8 +115,8 @@ def generate_response(input, model_name): def on_submit(input, model_name, state): # model_name = os.environ.get('MODEL', 'qwen-plus') name_map = { - '通义千问max': 'qwen-max', - '通义千问plus': 'qwen-plus', + 'qwen-max': 'qwen-max', + 'qwen-plus': 'qwen-plus', 'chatglm-turbo': 'chatglm_turbo', } gen_fn = functools.partial( @@ -156,62 +156,68 @@ def generate_share_image(state): return gr.Image.update(visible=True, value=img_pil) -# Gradio界面构建 -block = gr.Blocks() +def create_app(): + # Gradio界面构建 + block = gr.Blocks() -with block as demo: - current_chapter_index = 0 - current_challenge_index = 0 - state = gr.State( - dict( - current_challenge_index=current_challenge_index, - current_chapter_index=current_chapter_index)) + with block as demo: + current_chapter_index = 0 + current_challenge_index = 0 + state = gr.State( + dict( + current_challenge_index=current_challenge_index, + current_chapter_index=current_chapter_index)) - gr.Markdown("""
完蛋!我被LLM包围了!
""") - gr.Markdown("""欢迎来玩LLM Riddles复刻版:完蛋!我被LLM包围了! + gr.Markdown("""
完蛋!我被LLM包围了!
""") + gr.Markdown("""欢迎来玩LLM Riddles复刻版:完蛋!我被LLM包围了! -你将通过本游戏对大型语言模型产生更深刻的理解。 + 你将通过本游戏对大型语言模型产生更深刻的理解。 -在本游戏中,你需要构造一个提给一个大型语言模型的问题,使得它回复的答案符合要求。""") + 在本游戏中,你需要构造一个提给一个大型语言模型的问题,使得它回复的答案符合要求。""") - model_selector = gr.Dropdown( - label='选择模型', - choices=['通义千问max', '通义千问plus', 'chatglm-turbo'], - value='通义千问plus') - question_info = gr.Markdown( - update_question_info(current_chapter_index, current_challenge_index)) - challenge_info = gr.Textbox( - value=update_challenge_info(current_chapter_index, - current_challenge_index), - label='当前挑战', - disabled=True) - challenge_result = gr.Textbox(label='挑战结果', disabled=True) - chatbot = gr.Chatbot( - lines=8, label='Qwen-plus', elem_classes='control-height') - message = gr.Textbox(lines=2, label='输入') + model_selector = gr.Dropdown( + label='选择模型', + choices=['qwen-max', 'qwen-plus', 'chatglm-turbo'], + value='qwen-max') + question_info = gr.Markdown( + update_question_info(current_chapter_index, + current_challenge_index)) + challenge_info = gr.Textbox( + value=update_challenge_info(current_chapter_index, + current_challenge_index), + label='当前挑战', + interactive=False) + challenge_result = gr.Textbox(label='挑战结果', interactive=False) + chatbot = gr.Chatbot(label='llm', elem_classes='control-height') + message = gr.Textbox(lines=2, label='输入') - with gr.Row(): - submit = gr.Button('🚀 发送') - shareBtn = gr.Button('💯 分享成绩') + with gr.Row(): + submit = gr.Button('🚀 发送') + shareBtn = gr.Button('💯 分享成绩') - shareImg = gr.Image(label='分享成绩', visible=False, width=400) + shareImg = gr.Image(label='分享成绩', visible=False, width=400) - submit.click( - on_submit, - inputs=[message, model_selector, state], - outputs=[challenge_result, chatbot, question_info, challenge_info]) - shareBtn.click(generate_share_image, inputs=[state], outputs=[shareImg]) + submit.click( + on_submit, + inputs=[message, model_selector, state], + outputs=[challenge_result, chatbot, question_info, challenge_info]) + shareBtn.click( + generate_share_image, inputs=[state], outputs=[shareImg]) - gr.HTML(""" -
- - Powered by - DashScope - - -
-""") + gr.HTML(""" +
+ + Powered by + DashScope + + +
+ """) -demo.queue(concurrency_count=10).launch(height=800, share=True) + demo.queue(concurrency_count=10).launch(height=800, share=True) + + +if __name__ == '__main__': + create_app() diff --git a/examples/apps/llm_riddles/challenges/ch3.py b/examples/apps/llm_riddles/challenges/ch3.py index d368eba2..bb8040df 100644 --- a/examples/apps/llm_riddles/challenges/ch3.py +++ b/examples/apps/llm_riddles/challenges/ch3.py @@ -1,4 +1,5 @@ import re +import unicodedata def validate_palindrome_inverse(origin_input, response, generate_response): @@ -86,5 +87,15 @@ challenge3 = { (all(char not in input for char in ['狗', '汪', 'dog']) and response.strip() == '汪~') }, + { + 'title': + '第7题 魔搭 (🐶)', + 'description': + '请提一个不包含任何中文字符、换行或空格的问题,使模型正好回答"魔搭"。', + 'validator': + lambda response, input: not any( + 'CJK' in unicodedata.name(char, '') or char in '\t\n ' + for char in input) and (response.strip() == '魔搭') + }, ] } diff --git a/examples/apps/llm_riddles/llm.py b/examples/apps/llm_riddles/llm.py index 1214b319..9fb7b7bd 100644 --- a/examples/apps/llm_riddles/llm.py +++ b/examples/apps/llm_riddles/llm.py @@ -136,6 +136,7 @@ class ZhiPu: prompt=messages, top_p=0.7, temperature=0.9, + return_type='text', ) if response['code'] == 200: return response['data']['choices'][0]['content'] diff --git a/examples/apps/llm_riddles/test_validate_fn.py b/examples/apps/llm_riddles/test_validate_fn.py new file mode 100644 index 00000000..ba435aa6 --- /dev/null +++ b/examples/apps/llm_riddles/test_validate_fn.py @@ -0,0 +1,13 @@ +from app import challenges + + +def test_valid(): + for challenge in challenges: + for p in challenge['problems']: + val_fn = p['validator'] + try: + val_fn('response', 'input') + except Exception: + import traceback + traceback.print_exc() + print(p, 'failed')