fix video_multi_model_embedding

Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10191199
This commit is contained in:
siyang.ssy
2022-09-20 20:01:16 +08:00
committed by wenmeng.zwm
parent 02f0f37134
commit d6942716c1
2 changed files with 4 additions and 4 deletions

View File

@@ -42,7 +42,10 @@ class VideoCLIPForMultiModalEmbedding(TorchModel):
self.max_frames = model_config['max_frames']
self.feature_framerate = model_config['feature_framerate']
self.image_resolution = 224
self.device = model_config['device']
if torch.cuda.is_available():
self.device = model_config['device']
else:
self.device = 'cpu'
self.init_model = f'{model_dir}/{ModelFile.TORCH_MODEL_BIN_FILE}'
self.tokenizer = ClipTokenizer(model_dir)

View File

@@ -85,9 +85,6 @@ class CLIP4Clip(nn.Module):
linear_patch=config['linear_patch'],
use_gc=config['use_gc']).float()
if (platform.system() != 'Darwin'):
convert_weights(self.clip) # fp16
if backbone in ['ViT-B/32', 'ViT-B/16']:
cross_config = SimpleNamespace(**{
'hidden_size': 512,