mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-25 04:29:22 +01:00
fix ofa new transformers compatible issue
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/14317517 * fix ofa new transformers compatible issue * fix timm.layers to timm.models.layers compatible issue
This commit is contained in:
@@ -9,8 +9,8 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from mmcv.cnn import ConvModule
|
||||
from timm.layers.drop import drop_path
|
||||
from timm.layers.weight_init import trunc_normal_
|
||||
from timm.models.layers.drop import drop_path
|
||||
from timm.models.layers.weight_init import trunc_normal_
|
||||
|
||||
from .common import Upsample, resize
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ from collections import OrderedDict
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.checkpoint as checkpoint
|
||||
from timm.layers.drop import drop_path
|
||||
from timm.layers.weight_init import trunc_normal_
|
||||
from timm.models.layers.drop import drop_path
|
||||
from timm.models.layers.weight_init import trunc_normal_
|
||||
from torch import nn
|
||||
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from mmcv.cnn import ConvModule
|
||||
from timm.layers.drop import drop_path
|
||||
from timm.layers.weight_init import trunc_normal_
|
||||
from timm.models.layers.drop import drop_path
|
||||
from timm.models.layers.weight_init import trunc_normal_
|
||||
|
||||
from .common import resize
|
||||
|
||||
|
||||
@@ -183,6 +183,12 @@ class OFATokenizerZH(PreTrainedTokenizer):
|
||||
tokenize_chinese_chars=True,
|
||||
strip_accents=None,
|
||||
**kwargs):
|
||||
if not os.path.isfile(vocab_file):
|
||||
raise ValueError(
|
||||
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
|
||||
'model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`'
|
||||
)
|
||||
self.vocab = load_vocab(vocab_file)
|
||||
super().__init__(
|
||||
do_lower_case=do_lower_case,
|
||||
do_basic_tokenize=do_basic_tokenize,
|
||||
@@ -199,12 +205,6 @@ class OFATokenizerZH(PreTrainedTokenizer):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if not os.path.isfile(vocab_file):
|
||||
raise ValueError(
|
||||
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
|
||||
'model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`'
|
||||
)
|
||||
self.vocab = load_vocab(vocab_file)
|
||||
self.ids_to_tokens = collections.OrderedDict([
|
||||
(ids, tok) for tok, ids in self.vocab.items()
|
||||
])
|
||||
|
||||
Reference in New Issue
Block a user