diff --git a/modelscope/models/nlp/codegeex/codegeex_for_code_translation.py b/modelscope/models/nlp/codegeex/codegeex_for_code_translation.py index be3e79f0..fece907d 100755 --- a/modelscope/models/nlp/codegeex/codegeex_for_code_translation.py +++ b/modelscope/models/nlp/codegeex/codegeex_for_code_translation.py @@ -98,7 +98,9 @@ class CodeGeeXForCodeTranslation(TorchModel): generated_code = tokenizer.decode_code( generated_tokens_[n_token_prompt:]) generated_code = ''.join(generated_code) - logger.info('================================= Generated code:') + logger.info( + '================================= Generated code:' + ) logger.info(generated_code) if all(is_finished): break diff --git a/modelscope/models/nlp/codegeex/inference.py b/modelscope/models/nlp/codegeex/inference.py index d058f023..38f14d6c 100755 --- a/modelscope/models/nlp/codegeex/inference.py +++ b/modelscope/models/nlp/codegeex/inference.py @@ -1,8 +1,9 @@ # Copyright (c) 2022 Zhipu.AI +from typing import List + import torch import torch.nn.functional as F -from typing import List def get_ltor_masks_and_position_ids( @@ -124,7 +125,7 @@ def pad_batch(batch, pad_id, seq_length): tokens.extend([pad_id] * (seq_length - context_length)) context_lengths.append(context_length) return batch, context_lengths - + def get_token_stream( model, diff --git a/modelscope/models/nlp/codegeex/tokenizer.py b/modelscope/models/nlp/codegeex/tokenizer.py index cc507eb6..a5da9a3c 100755 --- a/modelscope/models/nlp/codegeex/tokenizer.py +++ b/modelscope/models/nlp/codegeex/tokenizer.py @@ -1,8 +1,9 @@ # Copyright (c) 2022 Zhipu.AI +from typing import List, Union + import torch from transformers import AutoTokenizer from transformers.models.gpt2 import GPT2TokenizerFast -from typing import List, Union def encode_whitespaces(text, start_extra_id: int, max_len: int): diff --git a/modelscope/pipelines/nlp/codegeex_code_translation_pipeline.py b/modelscope/pipelines/nlp/codegeex_code_translation_pipeline.py index f2bce381..ef0f29e0 100755 --- a/modelscope/pipelines/nlp/codegeex_code_translation_pipeline.py +++ b/modelscope/pipelines/nlp/codegeex_code_translation_pipeline.py @@ -28,9 +28,9 @@ class CodeGeeXCodeTranslationPipeline(Pipeline): self.model.cuda() super().__init__(model=model, **kwargs) - + def preprocess(self, inputs, **preprocess_params) -> Dict[str, Any]: - return inputs + return inputs # define the forward pass def forward(self, inputs: Union[Dict], **forward_params) -> Dict[str, Any]: diff --git a/tests/pipelines/test_CodeGeeX_code_translation.py b/tests/pipelines/test_CodeGeeX_code_translation.py index a56ae00e..0972c494 100644 --- a/tests/pipelines/test_CodeGeeX_code_translation.py +++ b/tests/pipelines/test_CodeGeeX_code_translation.py @@ -17,10 +17,7 @@ class CodeGeeXCodeTranslationTest(unittest.TestCase, DemoCompatibilityCheck): @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_run_with_CodeGeeX_with_name(self): model = 'ZhipuAI/CodeGeeX-Code-Translation-13B' - pipe = pipeline( - task=Tasks.code_translation, - model=model - ) + pipe = pipeline(task=Tasks.code_translation, model=model) inputs = { 'prompt': 'for i in range(10):\n\tprint(i)\n', 'source language': 'Python',