From f5adc992a22875f9d4810bc01540c614a0bf2c17 Mon Sep 17 00:00:00 2001 From: Yingda Chen Date: Thu, 21 Nov 2024 19:07:36 +0800 Subject: [PATCH] fix message --- modelscope/pipelines/nlp/llm_pipeline.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modelscope/pipelines/nlp/llm_pipeline.py b/modelscope/pipelines/nlp/llm_pipeline.py index c4f3cc10..416a7d73 100644 --- a/modelscope/pipelines/nlp/llm_pipeline.py +++ b/modelscope/pipelines/nlp/llm_pipeline.py @@ -370,6 +370,7 @@ class LLMPipeline(Pipeline, PipelineStreamingOutputMixin): def preprocess(self, inputs: Union[str, Dict], **kwargs): is_messages = kwargs.pop('is_messages') + print(kwargs) if is_messages: tokens = self.format_messages(inputs, self.tokenizer, **kwargs) else: @@ -440,6 +441,7 @@ class LLMPipeline(Pipeline, PipelineStreamingOutputMixin): # for compatibility, also support input list, but we shall wrap it into Dict if isinstance(messages, list): messages = {'messages': messages} + kwargs['is_message'] = True for role, content in LLMPipeline._message_iter(messages): tokens = LLMPipeline._concat_with_special_tokens( tokens, role, content, tokenizer)