diff --git a/modelscope/models/cv/shop_segmentation/head_fpn.py b/modelscope/models/cv/shop_segmentation/head_fpn.py index dfa284d4..0d4027cb 100644 --- a/modelscope/models/cv/shop_segmentation/head_fpn.py +++ b/modelscope/models/cv/shop_segmentation/head_fpn.py @@ -9,8 +9,8 @@ import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule -from timm.layers.drop import drop_path -from timm.layers.weight_init import trunc_normal_ +from timm.models.layers.drop import drop_path +from timm.models.layers.weight_init import trunc_normal_ from .common import Upsample, resize diff --git a/modelscope/models/cv/shop_segmentation/models.py b/modelscope/models/cv/shop_segmentation/models.py index 1b07a08c..a206e9f1 100644 --- a/modelscope/models/cv/shop_segmentation/models.py +++ b/modelscope/models/cv/shop_segmentation/models.py @@ -11,8 +11,8 @@ from collections import OrderedDict import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint -from timm.layers.drop import drop_path -from timm.layers.weight_init import trunc_normal_ +from timm.models.layers.drop import drop_path +from timm.models.layers.weight_init import trunc_normal_ from torch import nn diff --git a/modelscope/models/cv/shop_segmentation/neck_fpn.py b/modelscope/models/cv/shop_segmentation/neck_fpn.py index 12c11d76..d344de71 100644 --- a/modelscope/models/cv/shop_segmentation/neck_fpn.py +++ b/modelscope/models/cv/shop_segmentation/neck_fpn.py @@ -8,8 +8,8 @@ import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule -from timm.layers.drop import drop_path -from timm.layers.weight_init import trunc_normal_ +from timm.models.layers.drop import drop_path +from timm.models.layers.weight_init import trunc_normal_ from .common import resize diff --git a/modelscope/models/multi_modal/ofa/tokenization_ofa.py b/modelscope/models/multi_modal/ofa/tokenization_ofa.py index 77de7a1d..ea79a327 100644 --- a/modelscope/models/multi_modal/ofa/tokenization_ofa.py +++ b/modelscope/models/multi_modal/ofa/tokenization_ofa.py @@ -183,6 +183,12 @@ class OFATokenizerZH(PreTrainedTokenizer): tokenize_chinese_chars=True, strip_accents=None, **kwargs): + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained " + 'model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' + ) + self.vocab = load_vocab(vocab_file) super().__init__( do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, @@ -199,12 +205,6 @@ class OFATokenizerZH(PreTrainedTokenizer): **kwargs, ) - if not os.path.isfile(vocab_file): - raise ValueError( - f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained " - 'model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' - ) - self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([ (ids, tok) for tok, ids in self.vocab.items() ])