mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-16 16:27:45 +01:00
Fix word orignal (#750)
This commit is contained in:
@@ -13,7 +13,7 @@ class Pix2PixModel(nn.Module):
|
|||||||
The model training requires '--dataset_mode aligned' dataset.
|
The model training requires '--dataset_mode aligned' dataset.
|
||||||
By default, it uses a '--netG unet256' U-Net generator,
|
By default, it uses a '--netG unet256' U-Net generator,
|
||||||
a '--netD basic' discriminator (PatchGAN),
|
a '--netD basic' discriminator (PatchGAN),
|
||||||
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
|
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the original GAN paper).
|
||||||
|
|
||||||
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
|
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class Mlp(nn.Module):
|
|||||||
x = self.fc1(x)
|
x = self.fc1(x)
|
||||||
x = self.act(x)
|
x = self.act(x)
|
||||||
# x = self.drop(x)
|
# x = self.drop(x)
|
||||||
# commit this for the orignal BERT implement
|
# commit this for the original BERT implement
|
||||||
x = self.fc2(x)
|
x = self.fc2(x)
|
||||||
x = self.drop(x)
|
x = self.drop(x)
|
||||||
return x
|
return x
|
||||||
|
|||||||
@@ -1031,7 +1031,7 @@ class PlugMentalModel(PlugMentalPreTrainedModel):
|
|||||||
head_mask = self.get_head_mask(head_mask,
|
head_mask = self.get_head_mask(head_mask,
|
||||||
self.config.num_hidden_layers)
|
self.config.num_hidden_layers)
|
||||||
|
|
||||||
embedding_output, orignal_embeds = self.embeddings(
|
embedding_output, original_embeds = self.embeddings(
|
||||||
input_ids=input_ids,
|
input_ids=input_ids,
|
||||||
position_ids=position_ids,
|
position_ids=position_ids,
|
||||||
token_type_ids=token_type_ids,
|
token_type_ids=token_type_ids,
|
||||||
@@ -1065,7 +1065,7 @@ class PlugMentalModel(PlugMentalPreTrainedModel):
|
|||||||
|
|
||||||
if not return_dict:
|
if not return_dict:
|
||||||
return (sequence_output,
|
return (sequence_output,
|
||||||
pooled_output) + encoder_outputs[1:] + (orignal_embeds, )
|
pooled_output) + encoder_outputs[1:] + (original_embeds, )
|
||||||
|
|
||||||
return AttentionBackboneModelOutputWithEmbedding(
|
return AttentionBackboneModelOutputWithEmbedding(
|
||||||
last_hidden_state=sequence_output,
|
last_hidden_state=sequence_output,
|
||||||
@@ -1074,4 +1074,4 @@ class PlugMentalModel(PlugMentalPreTrainedModel):
|
|||||||
hidden_states=encoder_outputs.hidden_states,
|
hidden_states=encoder_outputs.hidden_states,
|
||||||
attentions=encoder_outputs.attentions,
|
attentions=encoder_outputs.attentions,
|
||||||
cross_attentions=encoder_outputs.cross_attentions,
|
cross_attentions=encoder_outputs.cross_attentions,
|
||||||
embedding_output=orignal_embeds)
|
embedding_output=original_embeds)
|
||||||
|
|||||||
@@ -881,7 +881,7 @@ class SbertModel(SbertPreTrainedModel):
|
|||||||
head_mask = self.get_head_mask(head_mask,
|
head_mask = self.get_head_mask(head_mask,
|
||||||
self.config.num_hidden_layers)
|
self.config.num_hidden_layers)
|
||||||
|
|
||||||
embedding_output, orignal_embeds = self.embeddings(
|
embedding_output, original_embeds = self.embeddings(
|
||||||
input_ids=input_ids,
|
input_ids=input_ids,
|
||||||
position_ids=position_ids,
|
position_ids=position_ids,
|
||||||
token_type_ids=token_type_ids,
|
token_type_ids=token_type_ids,
|
||||||
@@ -907,7 +907,7 @@ class SbertModel(SbertPreTrainedModel):
|
|||||||
|
|
||||||
if not return_dict:
|
if not return_dict:
|
||||||
return (sequence_output,
|
return (sequence_output,
|
||||||
pooled_output) + encoder_outputs[1:] + (orignal_embeds, )
|
pooled_output) + encoder_outputs[1:] + (original_embeds, )
|
||||||
|
|
||||||
return AttentionBackboneModelOutputWithEmbedding(
|
return AttentionBackboneModelOutputWithEmbedding(
|
||||||
last_hidden_state=sequence_output,
|
last_hidden_state=sequence_output,
|
||||||
@@ -916,4 +916,4 @@ class SbertModel(SbertPreTrainedModel):
|
|||||||
hidden_states=encoder_outputs.hidden_states,
|
hidden_states=encoder_outputs.hidden_states,
|
||||||
attentions=encoder_outputs.attentions,
|
attentions=encoder_outputs.attentions,
|
||||||
cross_attentions=encoder_outputs.cross_attentions,
|
cross_attentions=encoder_outputs.cross_attentions,
|
||||||
embedding_output=orignal_embeds)
|
embedding_output=original_embeds)
|
||||||
|
|||||||
Reference in New Issue
Block a user