From b037e9caf0a39b83c7340ddaadeb49e7ab3bb621 Mon Sep 17 00:00:00 2001 From: co63oc Date: Wed, 21 Feb 2024 14:53:33 +0800 Subject: [PATCH] Fix word orignal (#750) --- .../cv/face_reconstruction/models/pix2pix/pix2pix_model.py | 2 +- .../models/cv/image_classification/backbones/beit_v2.py | 2 +- modelscope/models/nlp/plug_mental/backbone.py | 6 +++--- modelscope/models/nlp/structbert/backbone.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/modelscope/models/cv/face_reconstruction/models/pix2pix/pix2pix_model.py b/modelscope/models/cv/face_reconstruction/models/pix2pix/pix2pix_model.py index 54768fc1..f1a7c6c7 100644 --- a/modelscope/models/cv/face_reconstruction/models/pix2pix/pix2pix_model.py +++ b/modelscope/models/cv/face_reconstruction/models/pix2pix/pix2pix_model.py @@ -13,7 +13,7 @@ class Pix2PixModel(nn.Module): The model training requires '--dataset_mode aligned' dataset. By default, it uses a '--netG unet256' U-Net generator, a '--netD basic' discriminator (PatchGAN), - and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper). + and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the original GAN paper). pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf """ diff --git a/modelscope/models/cv/image_classification/backbones/beit_v2.py b/modelscope/models/cv/image_classification/backbones/beit_v2.py index eda11727..a567eada 100644 --- a/modelscope/models/cv/image_classification/backbones/beit_v2.py +++ b/modelscope/models/cv/image_classification/backbones/beit_v2.py @@ -41,7 +41,7 @@ class Mlp(nn.Module): x = self.fc1(x) x = self.act(x) # x = self.drop(x) - # commit this for the orignal BERT implement + # commit this for the original BERT implement x = self.fc2(x) x = self.drop(x) return x diff --git a/modelscope/models/nlp/plug_mental/backbone.py b/modelscope/models/nlp/plug_mental/backbone.py index e8531f52..918fcdbd 100755 --- a/modelscope/models/nlp/plug_mental/backbone.py +++ b/modelscope/models/nlp/plug_mental/backbone.py @@ -1031,7 +1031,7 @@ class PlugMentalModel(PlugMentalPreTrainedModel): head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - embedding_output, orignal_embeds = self.embeddings( + embedding_output, original_embeds = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, @@ -1065,7 +1065,7 @@ class PlugMentalModel(PlugMentalPreTrainedModel): if not return_dict: return (sequence_output, - pooled_output) + encoder_outputs[1:] + (orignal_embeds, ) + pooled_output) + encoder_outputs[1:] + (original_embeds, ) return AttentionBackboneModelOutputWithEmbedding( last_hidden_state=sequence_output, @@ -1074,4 +1074,4 @@ class PlugMentalModel(PlugMentalPreTrainedModel): hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, - embedding_output=orignal_embeds) + embedding_output=original_embeds) diff --git a/modelscope/models/nlp/structbert/backbone.py b/modelscope/models/nlp/structbert/backbone.py index 58d324a8..d1998e98 100755 --- a/modelscope/models/nlp/structbert/backbone.py +++ b/modelscope/models/nlp/structbert/backbone.py @@ -881,7 +881,7 @@ class SbertModel(SbertPreTrainedModel): head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - embedding_output, orignal_embeds = self.embeddings( + embedding_output, original_embeds = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, @@ -907,7 +907,7 @@ class SbertModel(SbertPreTrainedModel): if not return_dict: return (sequence_output, - pooled_output) + encoder_outputs[1:] + (orignal_embeds, ) + pooled_output) + encoder_outputs[1:] + (original_embeds, ) return AttentionBackboneModelOutputWithEmbedding( last_hidden_state=sequence_output, @@ -916,4 +916,4 @@ class SbertModel(SbertPreTrainedModel): hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, - embedding_output=orignal_embeds) + embedding_output=original_embeds)