diff --git a/modelscope/models/audio/ans/conv_stft.py b/modelscope/models/audio/ans/conv_stft.py index 4b393a4c..3d37f1aa 100644 --- a/modelscope/models/audio/ans/conv_stft.py +++ b/modelscope/models/audio/ans/conv_stft.py @@ -39,7 +39,7 @@ class ConvSTFT(nn.Module): super(ConvSTFT, self).__init__() if fft_len is None: - self.fft_len = np.int(2**np.ceil(np.log2(win_len))) + self.fft_len = int(2**np.ceil(np.log2(win_len))) else: self.fft_len = fft_len @@ -78,7 +78,7 @@ class ConviSTFT(nn.Module): fix=True): super(ConviSTFT, self).__init__() if fft_len is None: - self.fft_len = np.int(2**np.ceil(np.log2(win_len))) + self.fft_len = int(2**np.ceil(np.log2(win_len))) else: self.fft_len = fft_len kernel, window = init_kernels( diff --git a/modelscope/models/cv/body_2d_keypoints/hrnet_v2.py b/modelscope/models/cv/body_2d_keypoints/hrnet_v2.py index ebd69adb..19e426b2 100644 --- a/modelscope/models/cv/body_2d_keypoints/hrnet_v2.py +++ b/modelscope/models/cv/body_2d_keypoints/hrnet_v2.py @@ -72,7 +72,7 @@ class PoseHighResolutionNetV2(TorchModel): self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multi_scale_output=True) """final four layers""" - last_inp_channels = np.int(np.sum(pre_stage_channels)) + last_inp_channels = int(np.sum(pre_stage_channels)) self.final_layer = nn.Sequential( nn.Conv2d( in_channels=last_inp_channels, diff --git a/modelscope/models/cv/cartoon/facelib/face_landmark.py b/modelscope/models/cv/cartoon/facelib/face_landmark.py index 3b7cc1b9..3c53f3a6 100644 --- a/modelscope/models/cv/cartoon/facelib/face_landmark.py +++ b/modelscope/models/cv/cartoon/facelib/face_landmark.py @@ -81,7 +81,7 @@ class FaceLandmark: bbox[2] = center[0] + one_edge // 2 bbox[3] = center[1] + one_edge // 2 - bbox = bbox.astype(np.int) + bbox = bbox.astype(int) crop_image = bimg[bbox[1]:bbox[3], bbox[0]:bbox[2], :] h, w, _ = crop_image.shape crop_image = cv2.resize( diff --git a/modelscope/models/cv/crowd_counting/hrnet_aspp_relu.py b/modelscope/models/cv/crowd_counting/hrnet_aspp_relu.py index 0d1bd3ca..64f40da0 100644 --- a/modelscope/models/cv/crowd_counting/hrnet_aspp_relu.py +++ b/modelscope/models/cv/crowd_counting/hrnet_aspp_relu.py @@ -356,7 +356,7 @@ class HighResolutionNet(nn.Module): num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) - last_inp_channels = np.int(np.sum(pre_stage_channels)) + 256 + last_inp_channels = int(np.sum(pre_stage_channels)) + 256 self.redc_layer = nn.Sequential( nn.Conv2d( in_channels=last_inp_channels, diff --git a/modelscope/models/cv/face_detection/peppa_pig_face/face_landmark.py b/modelscope/models/cv/face_detection/peppa_pig_face/face_landmark.py index 03a3b5b7..e7e2ddaf 100644 --- a/modelscope/models/cv/face_detection/peppa_pig_face/face_landmark.py +++ b/modelscope/models/cv/face_detection/peppa_pig_face/face_landmark.py @@ -82,7 +82,7 @@ class FaceLandmark: bbox[2] = center[0] + one_edge // 2 bbox[3] = center[1] + one_edge // 2 - bbox = bbox.astype(np.int) + bbox = bbox.astype(int) crop_image = bimg[bbox[1]:bbox[3], bbox[0]:bbox[2], :] h, w, _ = crop_image.shape crop_image = cv2.resize(crop_image, diff --git a/modelscope/models/cv/human_reconstruction/models/human_segmenter.py b/modelscope/models/cv/human_reconstruction/models/human_segmenter.py index 3f0261e7..29bf6f70 100644 --- a/modelscope/models/cv/human_reconstruction/models/human_segmenter.py +++ b/modelscope/models/cv/human_reconstruction/models/human_segmenter.py @@ -31,7 +31,7 @@ class human_segmenter(object): img = np.dstack((img, img, img)) elif img.shape[2] == 4: img = img[:, :, :3] - img = img.astype(np.float) + img = img.astype(float) return img def run(self, img): diff --git a/modelscope/models/cv/human_reconstruction/utils.py b/modelscope/models/cv/human_reconstruction/utils.py index 45653dc6..67e1efdb 100644 --- a/modelscope/models/cv/human_reconstruction/utils.py +++ b/modelscope/models/cv/human_reconstruction/utils.py @@ -69,8 +69,8 @@ def eval_grid(coords, num_samples=512 * 512 * 512): resolution = coords.shape[1:4] sdf = np.zeros(resolution) - dirty = np.ones(resolution, dtype=np.bool) - grid_mask = np.zeros(resolution, dtype=np.bool) + dirty = np.ones(resolution, dtype=bool) + grid_mask = np.zeros(resolution, dtype=bool) reso = resolution[0] // init_resolution while reso > 0: diff --git a/modelscope/models/cv/image_defrcn_fewshot/utils/voc_register.py b/modelscope/models/cv/image_defrcn_fewshot/utils/voc_register.py index 7a94066e..0b043493 100644 --- a/modelscope/models/cv/image_defrcn_fewshot/utils/voc_register.py +++ b/modelscope/models/cv/image_defrcn_fewshot/utils/voc_register.py @@ -163,7 +163,7 @@ def load_filtered_voc_instances(name: str, root: str, dirname: str, split: str, os.path.join(split_dir, 'box_{}shot_{}_train.txt'.format(shot, cls))) as f: - fileids_ = np.loadtxt(f, dtype=np.str).tolist() + fileids_ = np.loadtxt(f, dtype=np.str_).tolist() if isinstance(fileids_, str): fileids_ = [fileids_] fileids_ = [ @@ -219,7 +219,7 @@ def load_filtered_voc_instances(name: str, root: str, dirname: str, split: str, with PathManager.open( os.path.join(root, dirname, 'ImageSets', 'Main', split + '.txt')) as f: - fileids = np.loadtxt(f, dtype=np.str) + fileids = np.loadtxt(f, dtype=np.str_) for fileid in fileids: anno_file = os.path.join(root, dirname, 'Annotations', diff --git a/modelscope/models/cv/image_instance_segmentation/postprocess_utils.py b/modelscope/models/cv/image_instance_segmentation/postprocess_utils.py index fdbb2fb0..aad7d8e9 100644 --- a/modelscope/models/cv/image_instance_segmentation/postprocess_utils.py +++ b/modelscope/models/cv/image_instance_segmentation/postprocess_utils.py @@ -108,16 +108,16 @@ def get_img_ins_seg_result(img_seg_result=None, for seg_result in img_seg_result: box = [ - np.int(seg_result[0]), - np.int(seg_result[1]), - np.int(seg_result[2]), - np.int(seg_result[3]) + int(seg_result[0]), + int(seg_result[1]), + int(seg_result[2]), + int(seg_result[3]) ] - score = np.float(seg_result[4]) + score = float(seg_result[4]) category = seg_result[5] mask = np.array(seg_result[6], order='F', dtype='uint8') - mask = mask.astype(np.float) + mask = mask.astype(float) results_dict[OutputKeys.BOXES].append(box) results_dict[OutputKeys.MASKS].append(mask) diff --git a/modelscope/models/cv/image_mvs_depth_estimation/colmap2mvsnet.py b/modelscope/models/cv/image_mvs_depth_estimation/colmap2mvsnet.py index feda4430..37d92c13 100644 --- a/modelscope/models/cv/image_mvs_depth_estimation/colmap2mvsnet.py +++ b/modelscope/models/cv/image_mvs_depth_estimation/colmap2mvsnet.py @@ -382,7 +382,7 @@ def processing_single_scene(args): points3d[p3d_id].xyz[0], points3d[p3d_id].xyz[1], points3d[p3d_id].xyz[2], 1 ]) - zs.append(np.asscalar(transformed[2])) + zs.append(transformed[2].item()) zs_sorted = sorted(zs) # relaxed depth range max_ratio = 0.1 diff --git a/modelscope/models/cv/image_mvs_depth_estimation/depth_filter.py b/modelscope/models/cv/image_mvs_depth_estimation/depth_filter.py index 16cdedf4..4ef6275a 100644 --- a/modelscope/models/cv/image_mvs_depth_estimation/depth_filter.py +++ b/modelscope/models/cv/image_mvs_depth_estimation/depth_filter.py @@ -40,7 +40,7 @@ def read_mask(filename): # save a binary mask def save_mask(filename, mask): - assert mask.dtype == np.bool + assert mask.dtype == bool mask = mask.astype(np.uint8) * 255 Image.fromarray(mask).save(filename) diff --git a/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py b/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py index 2b38ebad..455f29fb 100644 --- a/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py +++ b/modelscope/models/cv/image_semantic_segmentation/semantic_seg_model.py @@ -60,7 +60,7 @@ class SemanticSegmentation(TorchModel): ids = ids[legal_indices] segms = (semantic_result[None] == ids[:, None, None]) - masks = [it.astype(np.int) for it in segms] + masks = [it.astype(int) for it in segms] labels_txt = np.array(self.CLASSES)[ids].tolist() results = { diff --git a/modelscope/models/cv/image_skychange/ptsemseg/hrnet_backnone.py b/modelscope/models/cv/image_skychange/ptsemseg/hrnet_backnone.py index 66429d67..8fcb6625 100644 --- a/modelscope/models/cv/image_skychange/ptsemseg/hrnet_backnone.py +++ b/modelscope/models/cv/image_skychange/ptsemseg/hrnet_backnone.py @@ -458,7 +458,7 @@ class HrnetBackBone(nn.Module): self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multi_scale_output=True) - self.backbone_last_inp_channels = np.int(np.sum(pre_stage_channels)) + self.backbone_last_inp_channels = int(np.sum(pre_stage_channels)) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): diff --git a/modelscope/models/cv/image_skychange/ptsemseg/hrnet_super_and_ocr.py b/modelscope/models/cv/image_skychange/ptsemseg/hrnet_super_and_ocr.py index 09768451..5dbef66e 100644 --- a/modelscope/models/cv/image_skychange/ptsemseg/hrnet_super_and_ocr.py +++ b/modelscope/models/cv/image_skychange/ptsemseg/hrnet_super_and_ocr.py @@ -259,7 +259,7 @@ class HrnetSuperAndOcr(HrnetBackBone): num_channels = [64, last_inp_channels] self.stage_super, super_stage_channels = self._make_stage( self.super_dict, num_channels) - last_inp_channels = np.int(np.sum(super_stage_channels)) + last_inp_channels = int(np.sum(super_stage_channels)) if self.is_contain_aspp: aspp_param = kwargs['aspp'] @@ -372,7 +372,7 @@ class HrnetSuperAndOcr(HrnetBackBone): num_channels = [64, ocr_mid_channels] self.stage_super, super_stage_channels = self._make_stage( self.super_dict, num_channels) - last_inp_channels = np.int(np.sum(super_stage_channels)) + last_inp_channels = int(np.sum(super_stage_channels)) self.cls_head = nn.Sequential( nn.Conv2d( diff --git a/modelscope/models/cv/nerf_recon_acc/network/segmenter.py b/modelscope/models/cv/nerf_recon_acc/network/segmenter.py index d71b9f16..e3d0ca8d 100644 --- a/modelscope/models/cv/nerf_recon_acc/network/segmenter.py +++ b/modelscope/models/cv/nerf_recon_acc/network/segmenter.py @@ -31,7 +31,7 @@ class ObjectSegmenter(object): elif img.shape[2] == 4: img = img[:, :, :3] img = img[:, :, ::-1] - img = img.astype(np.float) + img = img.astype(float) return img def run_mask(self, img): diff --git a/modelscope/models/cv/object_detection_3d/depe/result_vis.py b/modelscope/models/cv/object_detection_3d/depe/result_vis.py index d577ab68..d567654e 100644 --- a/modelscope/models/cv/object_detection_3d/depe/result_vis.py +++ b/modelscope/models/cv/object_detection_3d/depe/result_vis.py @@ -30,7 +30,7 @@ def depth2color(depth): if gray == 1: return tuple(colors[-1].tolist()) num_rank = len(colors) - 1 - rank = np.floor(gray * num_rank).astype(np.int) + rank = np.floor(gray * num_rank).astype(int) diff = (gray - rank / num_rank) * num_rank tmp = colors[rank + 1] - colors[rank] return tuple((colors[rank] + tmp * diff).tolist()) @@ -136,7 +136,7 @@ def plot_result(res_path, l2g = get_lidar2global(infos) corners_lidar = corners_global @ np.linalg.inv(l2g).T corners_lidar = corners_lidar[:, :3] - pred_flag = np.ones((corners_lidar.shape[0] // 8, ), dtype=np.bool) + pred_flag = np.ones((corners_lidar.shape[0] // 8, ), dtype=bool) scores = [ pred_res[rid]['detection_score'] for rid in range(len(pred_res)) ] @@ -151,7 +151,7 @@ def plot_result(res_path, origin=(0.5, 0.5, 0.5)).corners.numpy().reshape(-1, 3) corners_lidar = np.concatenate([corners_lidar, corners_lidar_gt], axis=0) - gt_flag = np.ones((corners_lidar_gt.shape[0] // 8), dtype=np.bool) + gt_flag = np.ones((corners_lidar_gt.shape[0] // 8), dtype=bool) pred_flag = np.concatenate( [pred_flag, np.logical_not(gt_flag)], axis=0) scores = scores + [0 for _ in range(infos['gt_boxes'].shape[0])] diff --git a/modelscope/models/cv/open_vocabulary_detection_vild/vild.py b/modelscope/models/cv/open_vocabulary_detection_vild/vild.py index 999ec27a..2aea0593 100644 --- a/modelscope/models/cv/open_vocabulary_detection_vild/vild.py +++ b/modelscope/models/cv/open_vocabulary_detection_vild/vild.py @@ -176,8 +176,7 @@ class OpenVocabularyDetectionViLD(Model): # Filter out invalid rois (nmsed rois) valid_indices = np.where( np.logical_and( - np.isin( - np.arange(len(roi_scores), dtype=np.int), nmsed_indices), + np.isin(np.arange(len(roi_scores), dtype=int), nmsed_indices), np.logical_and( np.logical_not(np.all(roi_boxes == 0., axis=-1)), np.logical_and(roi_scores >= min_rpn_score_thresh, diff --git a/modelscope/models/cv/panorama_depth_estimation/networks/layers.py b/modelscope/models/cv/panorama_depth_estimation/networks/layers.py index 99e166aa..52fb3d39 100644 --- a/modelscope/models/cv/panorama_depth_estimation/networks/layers.py +++ b/modelscope/models/cv/panorama_depth_estimation/networks/layers.py @@ -72,7 +72,7 @@ class Cube2Equirec(nn.Module): self.equ_h, 0), 3 * self.equ_w // 8, 1) # Prepare ceil mask - mask = np.zeros((self.equ_h, self.equ_w // 4), np.bool) + mask = np.zeros((self.equ_h, self.equ_w // 4), bool) idx = np.linspace(-np.pi, np.pi, self.equ_w // 4) / 4 idx = self.equ_h // 2 - np.round( np.arctan(np.cos(idx)) * self.equ_h / np.pi).astype(int) diff --git a/modelscope/models/cv/video_depth_estimation/utils/depth.py b/modelscope/models/cv/video_depth_estimation/utils/depth.py index e9f287e7..5fbf6aa6 100644 --- a/modelscope/models/cv/video_depth_estimation/utils/depth.py +++ b/modelscope/models/cv/video_depth_estimation/utils/depth.py @@ -29,7 +29,7 @@ def load_depth(file): elif file.endswith('png'): depth_png = np.array(load_image(file), dtype=int) assert (np.max(depth_png) > 255), 'Wrong .png depth file' - return depth_png.astype(np.float) / 256. + return depth_png.astype(float) / 256. else: raise NotImplementedError('Depth extension not supported.') diff --git a/modelscope/models/cv/video_frame_interpolation/utils/scene_change_detection.py b/modelscope/models/cv/video_frame_interpolation/utils/scene_change_detection.py index 4cbe60a7..379fe855 100644 --- a/modelscope/models/cv/video_frame_interpolation/utils/scene_change_detection.py +++ b/modelscope/models/cv/video_frame_interpolation/utils/scene_change_detection.py @@ -85,7 +85,7 @@ def do_scene_detect(F01_tensor, F10_tensor, img0_tensor, img1_tensor): img_diff = ori_img.float() - ref_img.float() img_diff = torch.abs(img_diff) - kernel = np.ones([8, 8], np.float) / 64 + kernel = np.ones([8, 8], float) / 64 kernel = torch.FloatTensor(kernel).to(device).unsqueeze(0).unsqueeze(0) diff = F.conv2d(img_diff, kernel, padding=4) diff --git a/modelscope/models/cv/video_multi_object_tracking/tracker/matching.py b/modelscope/models/cv/video_multi_object_tracking/tracker/matching.py index 45d2f5c0..e5c2e8a9 100644 --- a/modelscope/models/cv/video_multi_object_tracking/tracker/matching.py +++ b/modelscope/models/cv/video_multi_object_tracking/tracker/matching.py @@ -27,7 +27,7 @@ def linear_assignment(cost_matrix, thresh): def ious(atlbrs, btlbrs): - ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float) + ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=float) if ious.size == 0: return ious @@ -60,13 +60,13 @@ def embedding_distance(tracks, detections, metric='cosine'): cost_matrix: np.ndarray """ - cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float) + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=float) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray([track.curr_feat for track in detections], - dtype=np.float) + dtype=float) track_features = np.asarray([track.smooth_feat for track in tracks], - dtype=np.float) + dtype=float) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) return cost_matrix diff --git a/modelscope/models/cv/video_multi_object_tracking/tracker/multitracker.py b/modelscope/models/cv/video_multi_object_tracking/tracker/multitracker.py index 1dc3297f..d38477b7 100644 --- a/modelscope/models/cv/video_multi_object_tracking/tracker/multitracker.py +++ b/modelscope/models/cv/video_multi_object_tracking/tracker/multitracker.py @@ -28,7 +28,7 @@ class STrack(BaseTrack): def __init__(self, tlwh, score, temp_feat, buffer_size=30): # wait activate - self._tlwh = np.asarray(tlwh, dtype=np.float) + self._tlwh = np.asarray(tlwh, dtype=float) self.kalman_filter = None self.mean, self.covariance = None, None self.is_activated = False diff --git a/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py b/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py index 813f750e..743c049a 100644 --- a/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py +++ b/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py @@ -128,13 +128,13 @@ class VideoCLIPForMultiModalEmbedding(TorchModel): local_transform, s=None, e=None): - video_mask = np.zeros(self.max_frames, dtype=np.long) + video_mask = np.zeros(self.max_frames, dtype=int) max_video_length = 0 # T x 3 x H x W video = np.zeros((self.max_frames, 3, rawVideoExtractor.size, rawVideoExtractor.size), - dtype=np.float) + dtype=float) if s is None: start_time, end_time = None, None diff --git a/modelscope/models/nlp/mglm/blocklm_utils.py b/modelscope/models/nlp/mglm/blocklm_utils.py index b05cd2c2..e75aea92 100644 --- a/modelscope/models/nlp/mglm/blocklm_utils.py +++ b/modelscope/models/nlp/mglm/blocklm_utils.py @@ -212,10 +212,10 @@ class ConstructBlockStrategy: block_spans, rng, task='bert'): - position_ids = np.arange(len(tokens), dtype=np.long) + position_ids = np.arange(len(tokens), dtype=int) targets = copy.deepcopy(tokens) mask_id = self.tokenizer.get_command('MASK').Id - mlm_masks = np.zeros(len(tokens), dtype=np.long) + mlm_masks = np.zeros(len(tokens), dtype=int) for start, end in block_spans: for idx in range(start, end): tokens[idx] = mask_id @@ -231,7 +231,7 @@ class ConstructBlockStrategy: rng, task='bert'): text_length = len(tokens) - position_ids = np.ones(len(tokens), dtype=np.long) + position_ids = np.ones(len(tokens), dtype=int) for start, end in block_spans: position_ids[start + 1:end] = 0 position_ids = np.cumsum(position_ids) - 1 @@ -270,7 +270,7 @@ class ConstructBlockStrategy: (end - start + 1)) if self.block_position_encoding: target_block_position_ids.append( - np.arange(1, end - start + 2, dtype=np.long)) + np.arange(1, end - start + 2, dtype=int)) else: target_block_position_ids.append([1] * (end - start + 1)) block_spans.sort(key=lambda x: x[0]) @@ -307,7 +307,7 @@ class ConstructBlockStrategy: target_tokens = target_tokens + [ self.tokenizer.get_command('eop').Id ] - loss_masks = np.ones(len(target_tokens), dtype=np.long) + loss_masks = np.ones(len(target_tokens), dtype=int) return source_tokens, target_tokens, loss_masks else: tokens = np.concatenate(source_tokens + target_tokens) @@ -326,12 +326,12 @@ class ConstructBlockStrategy: for pos in mask_pos: tokens[pos] = self.tokenizer.get_command('dBLOCK').Id targets = np.concatenate(source_tokens + targets) - loss_masks = np.ones(len(tokens), dtype=np.long) + loss_masks = np.ones(len(tokens), dtype=int) loss_masks[:source_length] = 0 position_ids = np.concatenate(source_position_ids + target_position_ids) block_position_ids = np.concatenate( - [np.zeros(source_length, dtype=np.long)] + [np.zeros(source_length, dtype=int)] + target_block_position_ids) position_ids = np.stack([position_ids, block_position_ids], axis=0) if attention_mask is not None: @@ -539,22 +539,21 @@ class ConstructBlockStrategy: (source_tokens, [self.generation_mask], target_tokens)) loss_masks = np.concatenate( (np.zeros(len(source_tokens) + 1, - dtype=np.long), target_masks)) + dtype=int), target_masks)) token_batch.append(tokens) target_batch.append(targets) loss_mask_batch.append(loss_masks) position_ids = np.arange( - len(source_tokens) + len(target_tokens) + 1, - dtype=np.long) + len(source_tokens) + len(target_tokens) + 1, dtype=int) position_ids[len(source_tokens) + 1:] = len(source_tokens) if self.block_position_encoding: block_position_ids = np.concatenate( - (np.zeros(len(source_tokens), dtype=np.long), - np.arange(len(target_tokens) + 1, dtype=np.long))) + (np.zeros(len(source_tokens), dtype=int), + np.arange(len(target_tokens) + 1, dtype=int))) else: block_position_ids = np.concatenate( - (np.zeros(len(source_tokens) + 1, dtype=np.long), - np.ones(len(target_tokens) + 1, dtype=np.long))) + (np.zeros(len(source_tokens) + 1, dtype=int), + np.ones(len(target_tokens) + 1, dtype=int))) position_id_batch.append( np.stack([position_ids, block_position_ids], axis=0)) else: @@ -597,27 +596,25 @@ class ConstructBlockStrategy: max_length = max(seq_lengths) token_batch = [ np.concatenate( - (tokens, np.zeros(max_length - len(tokens), - dtype=np.long))) + (tokens, np.zeros(max_length - len(tokens), dtype=int))) for tokens in token_batch ] target_batch = [ np.concatenate( - (targets, - np.zeros(max_length - len(targets), dtype=np.long))) + (targets, np.zeros(max_length - len(targets), dtype=int))) for targets in target_batch ] loss_mask_batch = [ np.concatenate( (loss_masks, - np.zeros(max_length - len(loss_masks), dtype=np.long))) + np.zeros(max_length - len(loss_masks), dtype=int))) for loss_masks in loss_mask_batch ] position_id_batch = [ - np.concatenate((position_ids, - np.zeros( - (2, max_length - position_ids.shape[1]), - dtype=np.long)), - axis=1) for position_ids in position_id_batch + np.concatenate( + (position_ids, + np.zeros( + (2, max_length - position_ids.shape[1]), dtype=int)), + axis=1) for position_ids in position_id_batch ] return token_batch, target_batch, loss_mask_batch, position_id_batch diff --git a/modelscope/models/nlp/mglm/data_utils/datasets.py b/modelscope/models/nlp/mglm/data_utils/datasets.py index 39ffaea3..37bfbcc2 100644 --- a/modelscope/models/nlp/mglm/data_utils/datasets.py +++ b/modelscope/models/nlp/mglm/data_utils/datasets.py @@ -583,8 +583,8 @@ class XLDataset(data.Dataset): def getidx(self, idx): tokens, targets, loss_masks = [], [], [] attention_mask = np.concatenate( - (np.zeros((self.max_seq_len, self.mem_len), dtype=np.long), - np.ones((self.max_seq_len, self.max_seq_len), dtype=np.long)), + (np.zeros((self.max_seq_len, self.mem_len), dtype=int), + np.ones((self.max_seq_len, self.max_seq_len), dtype=int)), axis=1) sample_idx = bisect_right(self.indices, idx * self.max_seq_len) last_end = 0 if sample_idx == 0 else self.indices[sample_idx - 1] diff --git a/modelscope/models/nlp/mglm/test/test_block.py b/modelscope/models/nlp/mglm/test/test_block.py index ed4225da..eb630835 100644 --- a/modelscope/models/nlp/mglm/test/test_block.py +++ b/modelscope/models/nlp/mglm/test/test_block.py @@ -28,7 +28,7 @@ def main(): counts = np.array([0] * 10) for _ in range(10000): spans = strategy.sample_span_in_document( - np.array([1, 2, 3, 0, 4, 5, 6, 7, 9, 0], dtype=np.long), [1, 1], + np.array([1, 2, 3, 0, 4, 5, 6, 7, 9, 0], dtype=int), [1, 1], random.Random()) for start, end in spans: counts[start:end] += 1 diff --git a/modelscope/models/nlp/mglm/test/test_rel_shift.py b/modelscope/models/nlp/mglm/test/test_rel_shift.py index 00cbb9fe..ad68b15e 100644 --- a/modelscope/models/nlp/mglm/test/test_rel_shift.py +++ b/modelscope/models/nlp/mglm/test/test_rel_shift.py @@ -17,7 +17,7 @@ def main(): num_iters=300000, decay_style='cosine', decay_ratio=0.1) - steps = np.arange(0, 400000, 10, dtype=np.long) + steps = np.arange(0, 400000, 10, dtype=int) rates = [] for step in steps: lr_scheduler.num_iters = step diff --git a/modelscope/models/science/unifold/data/msa_pairing.py b/modelscope/models/science/unifold/data/msa_pairing.py index cc65962c..77c4e9a6 100644 --- a/modelscope/models/science/unifold/data/msa_pairing.py +++ b/modelscope/models/science/unifold/data/msa_pairing.py @@ -115,7 +115,7 @@ def pad_features(feature: np.ndarray, feature_name: str) -> np.ndarray: Returns: The feature with an additional padding row. """ - assert feature.dtype != np.dtype(np.string_) + assert feature.dtype != np.dtype(np.str_) if feature_name in ( 'msa_all_seq', 'msa_mask_all_seq', diff --git a/modelscope/models/science/unifold/msa/templates.py b/modelscope/models/science/unifold/msa/templates.py index d1ff8cf1..f2d3d79c 100644 --- a/modelscope/models/science/unifold/msa/templates.py +++ b/modelscope/models/science/unifold/msa/templates.py @@ -1100,9 +1100,9 @@ class HmmsearchHitFeaturizer(TemplateHitFeaturizer): np.zeros((1, num_res, residue_constants.atom_type_num, 3), np.float32), 'template_domain_names': - np.array([''.encode()], dtype=np.object), + np.array([''.encode()], dtype=np.object_), 'template_sequence': - np.array([''.encode()], dtype=np.object), + np.array([''.encode()], dtype=np.object_), 'template_sum_probs': np.array([0], dtype=np.float32), } diff --git a/modelscope/pipelines/cv/face_reconstruction_pipeline.py b/modelscope/pipelines/cv/face_reconstruction_pipeline.py index f8240fc0..b9a8e320 100644 --- a/modelscope/pipelines/cv/face_reconstruction_pipeline.py +++ b/modelscope/pipelines/cv/face_reconstruction_pipeline.py @@ -134,7 +134,7 @@ class FaceReconstructionPipeline(Pipeline): img = LoadImage.convert_to_ndarray(input) if len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - img = img.astype(np.float) + img = img.astype(float) result = {'img': img} return result diff --git a/modelscope/pipelines/cv/image_matting_pipeline.py b/modelscope/pipelines/cv/image_matting_pipeline.py index 5f5d1d56..bee655c5 100644 --- a/modelscope/pipelines/cv/image_matting_pipeline.py +++ b/modelscope/pipelines/cv/image_matting_pipeline.py @@ -53,7 +53,7 @@ class ImageMattingPipeline(Pipeline): def preprocess(self, input: Input) -> Dict[str, Any]: img = LoadImage.convert_to_ndarray(input) - img = img.astype(np.float) + img = img.astype(float) result = {'img': img} return result diff --git a/modelscope/pipelines/cv/image_style_transfer_pipeline.py b/modelscope/pipelines/cv/image_style_transfer_pipeline.py index e5fd0d48..49a0bff0 100644 --- a/modelscope/pipelines/cv/image_style_transfer_pipeline.py +++ b/modelscope/pipelines/cv/image_style_transfer_pipeline.py @@ -73,12 +73,12 @@ class ImageStyleTransferPipeline(Pipeline): content = LoadImage.convert_to_ndarray(content) if len(content.shape) == 2: content = cv2.cvtColor(content, cv2.COLOR_GRAY2BGR) - content_img = content.astype(np.float) + content_img = content.astype(float) style_img = LoadImage.convert_to_ndarray(style) if len(style_img.shape) == 2: style_img = cv2.cvtColor(style_img, cv2.COLOR_GRAY2BGR) - style_img = style_img.astype(np.float) + style_img = style_img.astype(float) result = {'content': content_img, 'style': style_img} return result diff --git a/modelscope/pipelines/cv/skin_retouching_pipeline.py b/modelscope/pipelines/cv/skin_retouching_pipeline.py index b2b5f4ca..da9b912f 100644 --- a/modelscope/pipelines/cv/skin_retouching_pipeline.py +++ b/modelscope/pipelines/cv/skin_retouching_pipeline.py @@ -105,7 +105,7 @@ class SkinRetouchingPipeline(Pipeline): img = LoadImage.convert_to_ndarray(input) if len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - img = img.astype(np.float) + img = img.astype(float) result = {'img': img} return result diff --git a/modelscope/pipelines/cv/tbs_detection_pipeline.py b/modelscope/pipelines/cv/tbs_detection_pipeline.py index 58831846..8bbac9c8 100644 --- a/modelscope/pipelines/cv/tbs_detection_pipeline.py +++ b/modelscope/pipelines/cv/tbs_detection_pipeline.py @@ -116,7 +116,7 @@ class TBSDetectionPipeline(Pipeline): - **labels** (`List[str]`, optional) -- The boxes's class_names of detected object in image. """ img = LoadImage.convert_to_ndarray(input) - img = img.astype(np.float) + img = img.astype(float) result = {'img': img, 'img_path': input} return result diff --git a/modelscope/utils/regress_test_utils.py b/modelscope/utils/regress_test_utils.py index 0f10c1ce..e03b3a7c 100644 --- a/modelscope/utils/regress_test_utils.py +++ b/modelscope/utils/regress_test_utils.py @@ -483,9 +483,9 @@ def numpify_tensor_nested(tensors, reduction=None, clip_value=10000): t = np.where(t > clip_value, clip_value, t) t = np.where(t < -clip_value, -clip_value, t) if reduction == 'sum': - return t.sum(dtype=np.float) + return t.sum(dtype=float) elif reduction == 'mean': - return t.mean(dtype=np.float) + return t.mean(dtype=float) return t return tensors diff --git a/modelscope/utils/test_utils.py b/modelscope/utils/test_utils.py index b4ce7299..03d293ec 100644 --- a/modelscope/utils/test_utils.py +++ b/modelscope/utils/test_utils.py @@ -150,7 +150,7 @@ def compare_arguments_nested(print_content, if arg1 is None: return True - elif isinstance(arg1, (int, str, bool, np.bool, np.integer, np.str)): + elif isinstance(arg1, (int, str, bool, np.bool_, np.integer, np.str_)): if arg1 != arg2: if print_content is not None: print(f'{print_content}, arg1:{arg1}, arg2:{arg2}') @@ -201,10 +201,8 @@ def compare_arguments_nested(print_content, return False return True elif isinstance(arg1, np.ndarray): - arg1 = np.where(np.equal(arg1, None), np.NaN, - arg1).astype(dtype=np.float) - arg2 = np.where(np.equal(arg2, None), np.NaN, - arg2).astype(dtype=np.float) + arg1 = np.where(np.equal(arg1, None), np.NaN, arg1).astype(dtype=float) + arg2 = np.where(np.equal(arg2, None), np.NaN, arg2).astype(dtype=float) if not all( np.isclose(arg1, arg2, rtol=rtol, atol=atol, equal_nan=True).flatten()): diff --git a/requirements/framework.txt b/requirements/framework.txt index e15e95eb..144cd31e 100644 --- a/requirements/framework.txt +++ b/requirements/framework.txt @@ -7,6 +7,8 @@ gast>=0.2.2 mmdet<=2.28.2 numpy<1.24.0 oss2 +# for datasets compatible +pandas<=1.5.3 Pillow>=6.2.0 # pyarrow 9.0.0 introduced event_loop core dump pyarrow>=6.0.0,!=9.0.0