mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-25 04:29:22 +01:00
Merge pull request #289 from Restry/patch-1
修复 fid_dialogue_pipeline.py 在cpu下推理时的异常
This commit is contained in:
@@ -191,8 +191,8 @@ class FidDialoguePipeline(Pipeline):
|
||||
def postprocess(self, inputs: TokenGeneratorOutput,
|
||||
**postprocess_params) -> Dict[str, Any]:
|
||||
|
||||
if torch.cuda.is_available():
|
||||
hypotheses = inputs.sequences.detach().cpu().tolist()
|
||||
# if torch.cuda.is_available():
|
||||
hypotheses = inputs.sequences.detach().cpu().tolist()
|
||||
|
||||
response = self.preprocessor_tokenizer.decode(
|
||||
hypotheses[0], skip_special_tokens=self.is_t5)
|
||||
|
||||
Reference in New Issue
Block a user