diff --git a/modelscope/models/base/base_torch_model.py b/modelscope/models/base/base_torch_model.py index 2caeb41b..27960b46 100644 --- a/modelscope/models/base/base_torch_model.py +++ b/modelscope/models/base/base_torch_model.py @@ -30,10 +30,22 @@ class TorchModel(Model, torch.nn.Module): def __call__(self, *args, **kwargs) -> Dict[str, Any]: # Adapting a model with only one dict arg, and the arg name must be input or inputs + + print(f'\n>>>TorchModel.__call__ args: {args}, kwargs: {kwargs}') + if func_receive_dict_inputs(self.forward): - return self.postprocess(self.forward(args[0], **kwargs)) + # return self.postprocess(self.forward(args[0], **kwargs)) + res = self.forward(args[0], **kwargs) + print( + f'>>res in TorchModel.__call__ with func_receive_dict_inputs: {res}' + ) + return self.postprocess(res) + else: - return self.postprocess(self.forward(*args, **kwargs)) + # return self.postprocess(self.forward(*args, **kwargs)) + res = self.forward(*args, **kwargs) + print(f'>>res in TorchModel.__call__: {res}') + return self.postprocess(res) def _load_pretrained(self, net, diff --git a/modelscope/models/nlp/chatglm2/text_generation.py b/modelscope/models/nlp/chatglm2/text_generation.py index c9d63724..fa5997b0 100644 --- a/modelscope/models/nlp/chatglm2/text_generation.py +++ b/modelscope/models/nlp/chatglm2/text_generation.py @@ -1205,7 +1205,17 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): **kwargs } inputs = self.build_inputs(tokenizer, query, history=history) + + print( + f'\n>>inputs in ChatGLM2ForConditionalGeneration._chat:\n {inputs}, \n>shape: {inputs.shape}' + ) + outputs = self.generate(**inputs, **gen_kwargs) + + print( + f'\n>>outputs in ChatGLM2ForConditionalGeneration._chat:\n {outputs}, \n>shape: {outputs.shape}' + ) + outputs = outputs.tolist()[0][len(inputs['input_ids'][0]):] response = tokenizer.decode(outputs) response = self.process_response(response) diff --git a/modelscope/pipelines/nlp/llm_pipeline.py b/modelscope/pipelines/nlp/llm_pipeline.py index d509fcc0..b6b157e1 100644 --- a/modelscope/pipelines/nlp/llm_pipeline.py +++ b/modelscope/pipelines/nlp/llm_pipeline.py @@ -121,6 +121,7 @@ class LLMPipeline(Pipeline): if hasattr(self.model, 'generate'): outputs = self.model.generate(**tokens, **forward_params) + print(f'>>>self.model.generate: {self.model.generate}') elif hasattr(self.model, 'model') and hasattr(self.model.model, 'generate'): outputs = self.model.model.generate(**tokens, **forward_params)