mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-24 12:09:22 +01:00
pre-commit lint
This commit is contained in:
@@ -5,13 +5,7 @@ from .audio.tts.vocoder import Hifigan16k
|
||||
from .base import Model
|
||||
from .builder import MODELS, build_model
|
||||
from .multi_model import OfaForImageCaptioning
|
||||
from .nlp import (
|
||||
BertForSequenceClassification,
|
||||
SbertForNLI,
|
||||
SbertForSentenceSimilarity,
|
||||
SbertForSentimentClassification,
|
||||
SbertForZeroShotClassification,
|
||||
StructBertForMaskedLM,
|
||||
VecoForMaskedLM,
|
||||
SbertForTokenClassification,
|
||||
)
|
||||
from .nlp import (BertForSequenceClassification, SbertForNLI,
|
||||
SbertForSentenceSimilarity, SbertForSentimentClassification,
|
||||
SbertForTokenClassification, SbertForZeroShotClassification,
|
||||
StructBertForMaskedLM, VecoForMaskedLM)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from .bert_for_sequence_classification import * # noqa F403
|
||||
from .masked_language_model import * # noqa F403
|
||||
from .sbert_for_nli import * # noqa F403
|
||||
from .palm_for_text_generation import * # noqa F403
|
||||
from .sbert_for_nli import * # noqa F403
|
||||
from .sbert_for_sentence_similarity import * # noqa F403
|
||||
from .sbert_for_token_classification import * # noqa F403
|
||||
from .sbert_for_sentiment_classification import * # noqa F403
|
||||
from .sbert_for_token_classification import * # noqa F403
|
||||
from .sbert_for_zero_shot_classification import * # noqa F403
|
||||
from .space.dialog_intent_prediction_model import * # noqa F403
|
||||
from .space.dialog_modeling_model import * # noqa F403
|
||||
from .sbert_for_zero_shot_classification import * # noqa F403
|
||||
|
||||
@@ -2,10 +2,10 @@ from typing import Any, Dict, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...metainfo import Models
|
||||
from ...utils.constant import Tasks
|
||||
from ..base import Model, Tensor
|
||||
from ..builder import MODELS
|
||||
from ...metainfo import Models
|
||||
|
||||
__all__ = ['StructBertForMaskedLM', 'VecoForMaskedLM', 'MaskedLMModelBase']
|
||||
|
||||
@@ -27,7 +27,7 @@ class MaskedLMModelBase(Model):
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
if hasattr(self.model, "config"):
|
||||
if hasattr(self.model, 'config'):
|
||||
return self.model.config
|
||||
return None
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from ...utils.constant import Tasks
|
||||
from .sbert_for_sequence_classification import SbertForSequenceClassificationBase
|
||||
from ..builder import MODELS
|
||||
from ...metainfo import Models
|
||||
from ...utils.constant import Tasks
|
||||
from ..builder import MODELS
|
||||
from .sbert_for_sequence_classification import \
|
||||
SbertForSequenceClassificationBase
|
||||
|
||||
__all__ = ['SbertForNLI']
|
||||
|
||||
@@ -17,5 +18,6 @@ class SbertForNLI(SbertForSequenceClassificationBase):
|
||||
model_cls (Optional[Any], optional): model loader, if None, use the
|
||||
default loader to load model weights, by default None.
|
||||
"""
|
||||
super().__init__(model_dir, *args, model_args={"num_labels": 3}, **kwargs)
|
||||
super().__init__(
|
||||
model_dir, *args, model_args={'num_labels': 3}, **kwargs)
|
||||
assert self.model.config.num_labels == 3
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from modelscope.metainfo import Models
|
||||
from modelscope.utils.constant import Tasks
|
||||
from .sbert_for_sequence_classification import SbertForSequenceClassificationBase
|
||||
from ..builder import MODELS
|
||||
from .sbert_for_sequence_classification import \
|
||||
SbertForSequenceClassificationBase
|
||||
|
||||
__all__ = ['SbertForSentenceSimilarity']
|
||||
|
||||
@@ -18,6 +19,7 @@ class SbertForSentenceSimilarity(SbertForSequenceClassificationBase):
|
||||
model_cls (Optional[Any], optional): model loader, if None, use the
|
||||
default loader to load model weights, by default None.
|
||||
"""
|
||||
super().__init__(model_dir, *args, model_args={"num_labels": 2}, **kwargs)
|
||||
super().__init__(
|
||||
model_dir, *args, model_args={'num_labels': 2}, **kwargs)
|
||||
self.model_dir = model_dir
|
||||
assert self.model.config.num_labels == 2
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
from modelscope.utils.constant import Tasks
|
||||
from .sbert_for_sequence_classification import SbertForSequenceClassificationBase
|
||||
from ..builder import MODELS
|
||||
from modelscope.metainfo import Models
|
||||
from modelscope.utils.constant import Tasks
|
||||
from ..builder import MODELS
|
||||
from .sbert_for_sequence_classification import \
|
||||
SbertForSequenceClassificationBase
|
||||
|
||||
__all__ = ['SbertForSentimentClassification']
|
||||
|
||||
|
||||
@MODELS.register_module(
|
||||
Tasks.sentiment_classification,
|
||||
module_name=Models.structbert)
|
||||
Tasks.sentiment_classification, module_name=Models.structbert)
|
||||
class SbertForSentimentClassification(SbertForSequenceClassificationBase):
|
||||
|
||||
def __init__(self, model_dir: str, *args, **kwargs):
|
||||
@@ -19,5 +19,6 @@ class SbertForSentimentClassification(SbertForSequenceClassificationBase):
|
||||
model_cls (Optional[Any], optional): model loader, if None, use the
|
||||
default loader to load model weights, by default None.
|
||||
"""
|
||||
super().__init__(model_dir, *args, model_args={"num_labels": 2}, **kwargs)
|
||||
super().__init__(
|
||||
model_dir, *args, model_args={'num_labels': 2}, **kwargs)
|
||||
assert self.model.config.num_labels == 2
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
from torch import nn
|
||||
from typing import Any, Dict
|
||||
from ..base import Model
|
||||
import numpy as np
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
import json
|
||||
import numpy as np
|
||||
import torch
|
||||
from sofa.models.sbert.modeling_sbert import SbertPreTrainedModel, SbertModel
|
||||
from sofa.models.sbert.modeling_sbert import SbertModel, SbertPreTrainedModel
|
||||
from torch import nn
|
||||
|
||||
from ..base import Model
|
||||
|
||||
|
||||
class SbertTextClassfier(SbertPreTrainedModel):
|
||||
@@ -27,9 +29,7 @@ class SbertTextClassfier(SbertPreTrainedModel):
|
||||
pooled_output = outputs[1]
|
||||
pooled_output = self.dropout(pooled_output)
|
||||
logits = self.classifier(pooled_output)
|
||||
return {
|
||||
"logits": logits
|
||||
}
|
||||
return {'logits': logits}
|
||||
|
||||
|
||||
class SbertForSequenceClassificationBase(Model):
|
||||
@@ -38,13 +38,17 @@ class SbertForSequenceClassificationBase(Model):
|
||||
super().__init__(model_dir, *args, **kwargs)
|
||||
if model_args is None:
|
||||
model_args = {}
|
||||
self.model = SbertTextClassfier.from_pretrained(model_dir, **model_args)
|
||||
self.model = SbertTextClassfier.from_pretrained(
|
||||
model_dir, **model_args)
|
||||
self.id2label = {}
|
||||
self.label_path = os.path.join(self.model_dir, 'label_mapping.json')
|
||||
if os.path.exists(self.label_path):
|
||||
with open(self.label_path) as f:
|
||||
self.label_mapping = json.load(f)
|
||||
self.id2label = {idx: name for name, idx in self.label_mapping.items()}
|
||||
self.id2label = {
|
||||
idx: name
|
||||
for name, idx in self.label_mapping.items()
|
||||
}
|
||||
|
||||
def train(self):
|
||||
return self.model.train()
|
||||
@@ -59,7 +63,7 @@ class SbertForSequenceClassificationBase(Model):
|
||||
return self.model.forward(input_ids, token_type_ids)
|
||||
|
||||
def postprocess(self, input, **kwargs):
|
||||
logits = input["logits"]
|
||||
logits = input['logits']
|
||||
probs = logits.softmax(-1).numpy()
|
||||
pred = logits.argmax(-1).numpy()
|
||||
logits = logits.numpy()
|
||||
|
||||
@@ -34,7 +34,7 @@ class SbertForTokenClassification(Model):
|
||||
|
||||
def eval(self):
|
||||
return self.model.eval()
|
||||
|
||||
|
||||
def forward(self, input: Dict[str,
|
||||
Any]) -> Dict[str, Union[str, np.ndarray]]:
|
||||
"""return the result by the model
|
||||
@@ -54,8 +54,9 @@ class SbertForTokenClassification(Model):
|
||||
input_ids = torch.tensor(input['input_ids']).unsqueeze(0)
|
||||
return {**self.model(input_ids), 'text': input['text']}
|
||||
|
||||
def postprocess(self, input: Dict[str, Tensor], **kwargs) -> Dict[str, Tensor]:
|
||||
logits = input["logits"]
|
||||
def postprocess(self, input: Dict[str, Tensor],
|
||||
**kwargs) -> Dict[str, Tensor]:
|
||||
logits = input['logits']
|
||||
pred = torch.argmax(logits[0], dim=-1)
|
||||
pred = pred.numpy()
|
||||
rst = {'predictions': pred, 'logits': logits, 'text': input['text']}
|
||||
|
||||
@@ -3,16 +3,15 @@ from typing import Any, Dict
|
||||
import numpy as np
|
||||
|
||||
from modelscope.utils.constant import Tasks
|
||||
from ...metainfo import Models
|
||||
from ..base import Model
|
||||
from ..builder import MODELS
|
||||
from ...metainfo import Models
|
||||
|
||||
__all__ = ['SbertForZeroShotClassification']
|
||||
|
||||
|
||||
@MODELS.register_module(
|
||||
Tasks.zero_shot_classification,
|
||||
module_name=Models.structbert)
|
||||
Tasks.zero_shot_classification, module_name=Models.structbert)
|
||||
class SbertForZeroShotClassification(Model):
|
||||
|
||||
def __init__(self, model_dir: str, *args, **kwargs):
|
||||
@@ -31,7 +30,7 @@ class SbertForZeroShotClassification(Model):
|
||||
|
||||
def eval(self):
|
||||
return self.model.eval()
|
||||
|
||||
|
||||
def forward(self, input: Dict[str, Any]) -> Dict[str, np.ndarray]:
|
||||
"""return the result by the model
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@ IntentUnifiedTransformer
|
||||
"""
|
||||
import torch
|
||||
|
||||
from .unified_transformer import \
|
||||
UnifiedTransformer
|
||||
from .unified_transformer import UnifiedTransformer
|
||||
|
||||
|
||||
class GenUnifiedTransformer(UnifiedTransformer):
|
||||
|
||||
@@ -7,10 +7,9 @@ import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .model_base import ModelBase
|
||||
from ..modules.embedder import Embedder
|
||||
from ..modules.transformer_block import \
|
||||
TransformerBlock
|
||||
from ..modules.transformer_block import TransformerBlock
|
||||
from .model_base import ModelBase
|
||||
|
||||
|
||||
class UnifiedTransformer(ModelBase):
|
||||
|
||||
@@ -6,8 +6,7 @@ import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .feedforward import FeedForward
|
||||
from .multihead_attention import \
|
||||
MultiheadAttention
|
||||
from .multihead_attention import MultiheadAttention
|
||||
|
||||
|
||||
class TransformerBlock(nn.Module):
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
from typing import Dict, Optional, Union, Any
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp.masked_language_model import \
|
||||
MaskedLMModelBase
|
||||
from ...models.nlp.masked_language_model import MaskedLMModelBase
|
||||
from ...preprocessors import FillMaskPreprocessor
|
||||
from ...utils.constant import Tasks
|
||||
from ..base import Pipeline, Tensor
|
||||
from ..builder import PIPELINES
|
||||
from ...metainfo import Pipelines
|
||||
|
||||
__all__ = ['FillMaskPipeline']
|
||||
|
||||
@@ -20,7 +19,7 @@ class FillMaskPipeline(Pipeline):
|
||||
def __init__(self,
|
||||
model: Union[MaskedLMModelBase, str],
|
||||
preprocessor: Optional[FillMaskPreprocessor] = None,
|
||||
first_sequence="sentense",
|
||||
first_sequence='sentense',
|
||||
**kwargs):
|
||||
"""use `model` and `preprocessor` to create a nlp fill mask pipeline for prediction
|
||||
|
||||
@@ -38,7 +37,8 @@ class FillMaskPipeline(Pipeline):
|
||||
first_sequence=first_sequence,
|
||||
second_sequence=None)
|
||||
fill_mask_model.eval()
|
||||
super().__init__(model=fill_mask_model, preprocessor=preprocessor, **kwargs)
|
||||
super().__init__(
|
||||
model=fill_mask_model, preprocessor=preprocessor, **kwargs)
|
||||
self.preprocessor = preprocessor
|
||||
self.tokenizer = preprocessor.tokenizer
|
||||
self.mask_id = {'veco': 250001, 'sbert': 103}
|
||||
|
||||
@@ -1,31 +1,28 @@
|
||||
import uuid
|
||||
from typing import Any, Dict, Union
|
||||
import torch
|
||||
import uuid
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ..base import Pipeline
|
||||
from ..builder import PIPELINES
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp import SbertForNLI
|
||||
from ...preprocessors import NLIPreprocessor
|
||||
from ...utils.constant import Tasks
|
||||
from ..base import Pipeline
|
||||
from ..builder import PIPELINES
|
||||
|
||||
__all__ = ['NLIPipeline']
|
||||
|
||||
|
||||
@PIPELINES.register_module(
|
||||
Tasks.nli, module_name=Pipelines.nli)
|
||||
@PIPELINES.register_module(Tasks.nli, module_name=Pipelines.nli)
|
||||
class NLIPipeline(Pipeline):
|
||||
|
||||
def __init__(self,
|
||||
model: Union[SbertForNLI, str],
|
||||
preprocessor: NLIPreprocessor = None,
|
||||
first_sequence="first_sequence",
|
||||
second_sequence="second_sequence",
|
||||
first_sequence='first_sequence',
|
||||
second_sequence='second_sequence',
|
||||
**kwargs):
|
||||
"""use `model` and `preprocessor` to create a nlp text classification pipeline for prediction
|
||||
|
||||
@@ -51,7 +48,8 @@ class NLIPipeline(Pipeline):
|
||||
with torch.no_grad():
|
||||
return super().forward(inputs, **forward_params)
|
||||
|
||||
def postprocess(self, inputs: Dict[str, Any], **postprocess_params) -> Dict[str, str]:
|
||||
def postprocess(self, inputs: Dict[str, Any],
|
||||
**postprocess_params) -> Dict[str, str]:
|
||||
"""process the prediction results
|
||||
|
||||
Args:
|
||||
|
||||
@@ -2,11 +2,12 @@ from typing import Any, Dict, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp import SbertForSentenceSimilarity
|
||||
from ...preprocessors import SequenceClassificationPreprocessor
|
||||
from ...utils.constant import Tasks
|
||||
from ...models import Model
|
||||
from ..base import Input, Pipeline
|
||||
from ..builder import PIPELINES
|
||||
|
||||
@@ -20,8 +21,8 @@ class SentenceSimilarityPipeline(Pipeline):
|
||||
def __init__(self,
|
||||
model: Union[Model, str],
|
||||
preprocessor: SequenceClassificationPreprocessor = None,
|
||||
first_sequence="first_sequence",
|
||||
second_sequence="second_sequence",
|
||||
first_sequence='first_sequence',
|
||||
second_sequence='second_sequence',
|
||||
**kwargs):
|
||||
"""use `model` and `preprocessor` to create a nlp sentence similarity pipeline for prediction
|
||||
|
||||
@@ -50,7 +51,8 @@ class SentenceSimilarityPipeline(Pipeline):
|
||||
with torch.no_grad():
|
||||
return super().forward(inputs, **forward_params)
|
||||
|
||||
def postprocess(self, inputs: Dict[str, Any], **postprocess_params) -> Dict[str, str]:
|
||||
def postprocess(self, inputs: Dict[str, Any],
|
||||
**postprocess_params) -> Dict[str, str]:
|
||||
"""process the prediction results
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, Dict, Union
|
||||
import torch
|
||||
|
||||
import json
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp import SbertForSentimentClassification
|
||||
from ...preprocessors import SentimentClassificationPreprocessor
|
||||
from ...utils.constant import Tasks
|
||||
from ...models import Model
|
||||
from ..base import Input, Pipeline
|
||||
from ..builder import PIPELINES
|
||||
from ...metainfo import Pipelines
|
||||
|
||||
__all__ = ['SentimentClassificationPipeline']
|
||||
|
||||
@@ -24,8 +25,8 @@ class SentimentClassificationPipeline(Pipeline):
|
||||
def __init__(self,
|
||||
model: Union[SbertForSentimentClassification, str],
|
||||
preprocessor: SentimentClassificationPreprocessor = None,
|
||||
first_sequence="first_sequence",
|
||||
second_sequence="second_sequence",
|
||||
first_sequence='first_sequence',
|
||||
second_sequence='second_sequence',
|
||||
**kwargs):
|
||||
"""use `model` and `preprocessor` to create a nlp text classification pipeline for prediction
|
||||
|
||||
@@ -52,7 +53,8 @@ class SentimentClassificationPipeline(Pipeline):
|
||||
with torch.no_grad():
|
||||
return super().forward(inputs, **forward_params)
|
||||
|
||||
def postprocess(self, inputs: Dict[str, Any], **postprocess_params) -> Dict[str, str]:
|
||||
def postprocess(self, inputs: Dict[str, Any],
|
||||
**postprocess_params) -> Dict[str, str]:
|
||||
"""process the prediction results
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Dict, Optional, Union, Any
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp import PalmForTextGeneration
|
||||
@@ -42,7 +44,8 @@ class TextGenerationPipeline(Pipeline):
|
||||
with torch.no_grad():
|
||||
return super().forward(inputs, **forward_params)
|
||||
|
||||
def postprocess(self, inputs: Dict[str, Tensor], **postprocess_params) -> Dict[str, str]:
|
||||
def postprocess(self, inputs: Dict[str, Tensor],
|
||||
**postprocess_params) -> Dict[str, str]:
|
||||
"""process the prediction results
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp import SbertForTokenClassification
|
||||
@@ -42,7 +44,8 @@ class WordSegmentationPipeline(Pipeline):
|
||||
with torch.no_grad():
|
||||
return super().forward(inputs, **forward_params)
|
||||
|
||||
def postprocess(self, inputs: Dict[str, Any], **postprocess_params) -> Dict[str, str]:
|
||||
def postprocess(self, inputs: Dict[str, Any],
|
||||
**postprocess_params) -> Dict[str, str]:
|
||||
"""process the prediction results
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, Dict, Union
|
||||
import torch
|
||||
|
||||
import json
|
||||
import numpy as np
|
||||
import torch
|
||||
from scipy.special import softmax
|
||||
|
||||
from ...metainfo import Pipelines
|
||||
from ...models import Model
|
||||
from ...models.nlp import SbertForZeroShotClassification
|
||||
from ...preprocessors import ZeroShotClassificationPreprocessor
|
||||
from ...utils.constant import Tasks
|
||||
from ...models import Model
|
||||
from ...metainfo import Pipelines
|
||||
from ..base import Input, Pipeline
|
||||
from ..builder import PIPELINES
|
||||
|
||||
|
||||
@@ -5,8 +5,7 @@ from typing import Any, Dict, Union
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from ..metainfo import Preprocessors
|
||||
from ..metainfo import Models
|
||||
from ..metainfo import Models, Preprocessors
|
||||
from ..utils.constant import Fields, InputFields
|
||||
from ..utils.type_assert import type_assert
|
||||
from .base import Preprocessor
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from .fields.intent_field import \
|
||||
IntentBPETextField
|
||||
from ...utils.config import Config
|
||||
from ...utils.constant import Fields
|
||||
from ...utils.type_assert import type_assert
|
||||
from ..base import Preprocessor
|
||||
from ..builder import PREPROCESSORS
|
||||
from .fields.intent_field import IntentBPETextField
|
||||
|
||||
__all__ = ['DialogIntentPredictionPreprocessor']
|
||||
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from .fields.gen_field import \
|
||||
MultiWOZBPETextField
|
||||
from ..base import Preprocessor
|
||||
from ..builder import PREPROCESSORS
|
||||
from ...utils.config import Config
|
||||
from ...utils.constant import Fields
|
||||
from ...utils.type_assert import type_assert
|
||||
from ..base import Preprocessor
|
||||
from ..builder import PREPROCESSORS
|
||||
from .fields.gen_field import MultiWOZBPETextField
|
||||
|
||||
__all__ = ['DialogModelingPreprocessor']
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ from itertools import chain
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..tokenizer import Tokenizer
|
||||
from ....utils.nlp.space import ontology, utils
|
||||
from ....utils.nlp.space.db_ops import MultiWozDB
|
||||
from ....utils.nlp.space.utils import list2np
|
||||
from ..tokenizer import Tokenizer
|
||||
|
||||
|
||||
class BPETextField(object):
|
||||
|
||||
@@ -14,10 +14,10 @@ import json
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from ..tokenizer import Tokenizer
|
||||
from ....utils.nlp.space import ontology, utils
|
||||
from ....utils.nlp.space.scores import hierarchical_set_score
|
||||
from ....utils.nlp.space.utils import list2np
|
||||
from ..tokenizer import Tokenizer
|
||||
|
||||
|
||||
class BPETextField(object):
|
||||
|
||||
@@ -14,8 +14,7 @@ import torch
|
||||
from tqdm import tqdm
|
||||
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
|
||||
|
||||
from ..metrics.metrics_tracker import \
|
||||
MetricsTracker
|
||||
from ..metrics.metrics_tracker import MetricsTracker
|
||||
|
||||
|
||||
def get_logger(log_path, name='default'):
|
||||
|
||||
Reference in New Issue
Block a user