Fix word orignal (#750)

This commit is contained in:
co63oc
2024-02-21 14:53:33 +08:00
committed by GitHub
parent 473bf33705
commit b037e9caf0
4 changed files with 8 additions and 8 deletions

View File

@@ -13,7 +13,7 @@ class Pix2PixModel(nn.Module):
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the original GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""

View File

@@ -41,7 +41,7 @@ class Mlp(nn.Module):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
# commit this for the original BERT implement
x = self.fc2(x)
x = self.drop(x)
return x

View File

@@ -1031,7 +1031,7 @@ class PlugMentalModel(PlugMentalPreTrainedModel):
head_mask = self.get_head_mask(head_mask,
self.config.num_hidden_layers)
embedding_output, orignal_embeds = self.embeddings(
embedding_output, original_embeds = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
@@ -1065,7 +1065,7 @@ class PlugMentalModel(PlugMentalPreTrainedModel):
if not return_dict:
return (sequence_output,
pooled_output) + encoder_outputs[1:] + (orignal_embeds, )
pooled_output) + encoder_outputs[1:] + (original_embeds, )
return AttentionBackboneModelOutputWithEmbedding(
last_hidden_state=sequence_output,
@@ -1074,4 +1074,4 @@ class PlugMentalModel(PlugMentalPreTrainedModel):
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
embedding_output=orignal_embeds)
embedding_output=original_embeds)

View File

@@ -881,7 +881,7 @@ class SbertModel(SbertPreTrainedModel):
head_mask = self.get_head_mask(head_mask,
self.config.num_hidden_layers)
embedding_output, orignal_embeds = self.embeddings(
embedding_output, original_embeds = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
@@ -907,7 +907,7 @@ class SbertModel(SbertPreTrainedModel):
if not return_dict:
return (sequence_output,
pooled_output) + encoder_outputs[1:] + (orignal_embeds, )
pooled_output) + encoder_outputs[1:] + (original_embeds, )
return AttentionBackboneModelOutputWithEmbedding(
last_hidden_state=sequence_output,
@@ -916,4 +916,4 @@ class SbertModel(SbertPreTrainedModel):
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
embedding_output=orignal_embeds)
embedding_output=original_embeds)