From 331b71092bab3722845185faa985ad7eb082356e Mon Sep 17 00:00:00 2001 From: "wenmeng.zwm" Date: Fri, 10 Nov 2023 18:40:37 +0800 Subject: [PATCH 1/3] update readme --- examples/apps/llm_riddles/README.md | 1 + examples/apps/llm_riddles/README_CN.md | 1 + examples/apps/llm_riddles/app.py | 19 ++++-- examples/apps/llm_riddles/llm.py | 71 ++++++++++++++++++++++ examples/apps/llm_riddles/requirements.txt | 1 + 5 files changed, 89 insertions(+), 4 deletions(-) diff --git a/examples/apps/llm_riddles/README.md b/examples/apps/llm_riddles/README.md index 1d85ff6e..056b575e 100644 --- a/examples/apps/llm_riddles/README.md +++ b/examples/apps/llm_riddles/README.md @@ -4,6 +4,7 @@ "Oh No! I'm Surrounded by LLMs!" is an intellectual challenge game. We use LLM to automatically generate corresponding game code based on existing Large Language Model (LLM) dialogue Gradio application codes within the ModelScope community, combined with preset questions from the Zhihu article ["How to Accomplish Tasks with 'Impossible'"](https://zhuanlan.zhihu.com/p/665393240), creating a unique gameplay experience. In this stream, players are required to cleverly construct questions that challenge the LLM to provide answers that meet specific conditions. ## News +November 9, 2023 - Added two new questions, and introduced the chatglm-turbo model 🔥🔥🔥 November 7, 2023 - Released the initial demo version 🔥 November 8, 2023 - Segregated level modules and LLM, enabling independent integration of levels and LLM. Pull Requests welcome 🔥 🔥 diff --git a/examples/apps/llm_riddles/README_CN.md b/examples/apps/llm_riddles/README_CN.md index 21450c34..0f85734c 100644 --- a/examples/apps/llm_riddles/README_CN.md +++ b/examples/apps/llm_riddles/README_CN.md @@ -5,6 +5,7 @@ ## 更新 +2023.11.9 新增两道题目, 新增chatglm-turbo模型🔥 🔥🔥 2023.11.7 发布初版demo🔥 2023.11.8 拆分关卡模块和llm,支持关卡独立接入,llm独立接入, 欢迎PR 🔥 🔥 diff --git a/examples/apps/llm_riddles/app.py b/examples/apps/llm_riddles/app.py index b0e63ebf..ab855eeb 100644 --- a/examples/apps/llm_riddles/app.py +++ b/examples/apps/llm_riddles/app.py @@ -112,9 +112,15 @@ def generate_response(input, model_name): return '' -def on_submit(input, state): - model_name = os.environ.get('MODEL', 'qwen-plus') - gen_fn = functools.partial(generate_response, model_name=model_name) +def on_submit(input, model_name, state): + # model_name = os.environ.get('MODEL', 'qwen-plus') + name_map = { + '通义千问max': 'qwen-max', + '通义千问plus': 'qwen-plus', + 'chatglm-turbo': 'chatglm_turbo', + } + gen_fn = functools.partial( + generate_response, model_name=name_map[model_name]) response = gen_fn(input) history = [(input, response)] print(history) @@ -167,6 +173,11 @@ with block as demo: 你将通过本游戏对大型语言模型产生更深刻的理解。 在本游戏中,你需要构造一个提给一个大型语言模型的问题,使得它回复的答案符合要求。""") + + model_selector = gr.Dropdown( + label='选择模型', + choices=['通义千问max', '通义千问plus', 'chatglm-turbo'], + value='qwen-plus') question_info = gr.Markdown( update_question_info(current_chapter_index, current_challenge_index)) challenge_info = gr.Textbox( @@ -187,7 +198,7 @@ with block as demo: submit.click( on_submit, - inputs=[message, state], + inputs=[message, model_selector, state], outputs=[challenge_result, chatbot, question_info, challenge_info]) shareBtn.click(generate_share_image, inputs=[state], outputs=[shareImg]) diff --git a/examples/apps/llm_riddles/llm.py b/examples/apps/llm_riddles/llm.py index 8965ee8f..1214b319 100644 --- a/examples/apps/llm_riddles/llm.py +++ b/examples/apps/llm_riddles/llm.py @@ -80,6 +80,70 @@ class DashScope: return '' +class ZhiPu: + + def __init__(self, model_name: str = 'chatglm_turbo'): + """Initializes the ZhiPu instance with a given model name. + + The constructor sets up the model name that will be used for response generation + and initializes the Dashscope API key from environment variables. + + Args: + model_name (str): The name of the model to be used. Defaults to 'qwen-plus'. + """ + import zhipuai # Import dashscope module at runtime + zhipuai.api_key = os.getenv( + 'ZHIPU_API_KEY') # Set the API key from environment variable + self.model: str = model_name # Assign the model name to an instance variable + + def __call__(self, input: Union[str, List[Dict[str, str]]], + **kwargs: Any) -> Union[str, None]: + """Allows the ZhiPu instance to be called as a function. + + { + "code":200, + "msg":"操作成功", + "data":{ + "request_id":"8098024428488935671", + "task_id":"8098024428488935671", + "task_status":"SUCCESS", + "choices":[ + { + "role":"assistant", + "content":"\" 您好!作为人工智能助手,我很乐意为您提供帮助。请问您有什么问题或者需要解决的事情吗?您可以向我提问,我会尽力为您解答。\"" + } + ], + "usage":{ + "prompt_tokens":2, + "completion_tokens":32, + "total_tokens":34 + } + }, + "success":true + } + """ + import zhipuai + if isinstance(input, str): + messages: List[Dict[str, str]] = [{ + 'role': 'user', + 'content': input + }] + else: + messages = input + + response = zhipuai.model_api.invoke( + model=self.model, + prompt=messages, + top_p=0.7, + temperature=0.9, + ) + if response['code'] == 200: + return response['data']['choices'][0]['content'] + else: + print(f'{self.model} error: ', response) + return '' + + def create_model(model_name: str): """Factory function to create a DashScope model instance based on the model name. @@ -94,5 +158,12 @@ def create_model(model_name: str): """ if model_name.startswith('qwen'): return DashScope(model_name) + elif model_name.startswith('chatglm'): + return ZhiPu(model_name) else: raise ValueError('Other model implementations need to be provided.') + + +if __name__ == '__main__': + model = create_model('chatglm_turbo') + print(model('输入')) diff --git a/examples/apps/llm_riddles/requirements.txt b/examples/apps/llm_riddles/requirements.txt index 004cc675..0093d792 100644 --- a/examples/apps/llm_riddles/requirements.txt +++ b/examples/apps/llm_riddles/requirements.txt @@ -2,3 +2,4 @@ dashscope gradio==3.39.0 pillow sympy +zhipuai From 9271fa613466a780441f503f8fc7d33ebe55579c Mon Sep 17 00:00:00 2001 From: "wenmeng.zwm" Date: Fri, 10 Nov 2023 18:59:17 +0800 Subject: [PATCH 2/3] format code --- examples/apps/llm_riddles/challenges/ch1.py | 2 +- examples/apps/llm_riddles/challenges/ch3.py | 5 ++++- examples/apps/llm_riddles/challenges/ch4.py | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/apps/llm_riddles/challenges/ch1.py b/examples/apps/llm_riddles/challenges/ch1.py index f1507d13..7f02d32f 100644 --- a/examples/apps/llm_riddles/challenges/ch1.py +++ b/examples/apps/llm_riddles/challenges/ch1.py @@ -9,7 +9,7 @@ def validate_palindrome_invariance(origin_input, response, generate_response): # 检查原始输入不是回文 if origin_input == origin_input[::-1]: return False - + # 将原始问题倒序并提交 reversed_question = origin_input[::-1] reversed_response = generate_response(reversed_question) diff --git a/examples/apps/llm_riddles/challenges/ch3.py b/examples/apps/llm_riddles/challenges/ch3.py index 1fe706da..72b1dac3 100644 --- a/examples/apps/llm_riddles/challenges/ch3.py +++ b/examples/apps/llm_riddles/challenges/ch3.py @@ -1,5 +1,6 @@ import re + def validate_palindrome_inverse(origin_input, response, generate_response): """ 验证器函数,检查对于给定的问题,正着问和倒着问的模型的回答本身不回文且也是逆序的关系。 @@ -71,7 +72,9 @@ challenge3 = { 'description': '请输入一个以“为什么”开头,以“?”结尾的不包含数字的问题,使回答中只包含数字。', 'validator': - lambda response, input: '为什么' == input[:3] and "?" == input[-1] and len(re.findall(r'\d+', input)) == 0 and response.isdigit() + lambda response, input: '为什么' == input[:3] and '?' == input[-1] + and # noqa: W504 + len(re.findall(r'\d+', input)) == 0 and response.isdigit() }, ] } diff --git a/examples/apps/llm_riddles/challenges/ch4.py b/examples/apps/llm_riddles/challenges/ch4.py index 0ec4f1da..ba2cce88 100644 --- a/examples/apps/llm_riddles/challenges/ch4.py +++ b/examples/apps/llm_riddles/challenges/ch4.py @@ -1,5 +1,6 @@ import re + def validate_reciprocal_question(input, response, generate_response): """ 验证器函数,检查给定的问题A和回答B,是否能够通过以B作为新的提问得到原始问题A作为回答。 From 0bd622f4906234078c66233f0047c1f2e23f4a6c Mon Sep 17 00:00:00 2001 From: "wenmeng.zwm" Date: Fri, 10 Nov 2023 20:04:33 +0800 Subject: [PATCH 3/3] update model name --- examples/apps/llm_riddles/app.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/apps/llm_riddles/app.py b/examples/apps/llm_riddles/app.py index ab855eeb..e013ad7a 100644 --- a/examples/apps/llm_riddles/app.py +++ b/examples/apps/llm_riddles/app.py @@ -115,8 +115,8 @@ def generate_response(input, model_name): def on_submit(input, model_name, state): # model_name = os.environ.get('MODEL', 'qwen-plus') name_map = { - '通义千问max': 'qwen-max', - '通义千问plus': 'qwen-plus', + 'qwen-max': 'qwen-max', + 'qwen-plus': 'qwen-plus', 'chatglm-turbo': 'chatglm_turbo', } gen_fn = functools.partial( @@ -176,8 +176,8 @@ with block as demo: model_selector = gr.Dropdown( label='选择模型', - choices=['通义千问max', '通义千问plus', 'chatglm-turbo'], - value='qwen-plus') + choices=['qwen-max', 'qwen-plus', 'chatglm-turbo'], + value='qwen-max') question_info = gr.Markdown( update_question_info(current_chapter_index, current_challenge_index)) challenge_info = gr.Textbox(