mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-24 20:19:22 +01:00
[to #49147498]feat: support python3.8
This commit is contained in:
@@ -39,7 +39,7 @@ class ConvSTFT(nn.Module):
|
||||
super(ConvSTFT, self).__init__()
|
||||
|
||||
if fft_len is None:
|
||||
self.fft_len = np.int(2**np.ceil(np.log2(win_len)))
|
||||
self.fft_len = int(2**np.ceil(np.log2(win_len)))
|
||||
else:
|
||||
self.fft_len = fft_len
|
||||
|
||||
@@ -78,7 +78,7 @@ class ConviSTFT(nn.Module):
|
||||
fix=True):
|
||||
super(ConviSTFT, self).__init__()
|
||||
if fft_len is None:
|
||||
self.fft_len = np.int(2**np.ceil(np.log2(win_len)))
|
||||
self.fft_len = int(2**np.ceil(np.log2(win_len)))
|
||||
else:
|
||||
self.fft_len = fft_len
|
||||
kernel, window = init_kernels(
|
||||
|
||||
@@ -72,7 +72,7 @@ class PoseHighResolutionNetV2(TorchModel):
|
||||
self.stage4, pre_stage_channels = self._make_stage(
|
||||
self.stage4_cfg, num_channels, multi_scale_output=True)
|
||||
"""final four layers"""
|
||||
last_inp_channels = np.int(np.sum(pre_stage_channels))
|
||||
last_inp_channels = int(np.sum(pre_stage_channels))
|
||||
self.final_layer = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
in_channels=last_inp_channels,
|
||||
|
||||
@@ -81,7 +81,7 @@ class FaceLandmark:
|
||||
bbox[2] = center[0] + one_edge // 2
|
||||
bbox[3] = center[1] + one_edge // 2
|
||||
|
||||
bbox = bbox.astype(np.int)
|
||||
bbox = bbox.astype(int)
|
||||
crop_image = bimg[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
|
||||
h, w, _ = crop_image.shape
|
||||
crop_image = cv2.resize(
|
||||
|
||||
@@ -356,7 +356,7 @@ class HighResolutionNet(nn.Module):
|
||||
num_channels)
|
||||
self.stage3, pre_stage_channels = self._make_stage(
|
||||
self.stage3_cfg, num_channels)
|
||||
last_inp_channels = np.int(np.sum(pre_stage_channels)) + 256
|
||||
last_inp_channels = int(np.sum(pre_stage_channels)) + 256
|
||||
self.redc_layer = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
in_channels=last_inp_channels,
|
||||
|
||||
@@ -82,7 +82,7 @@ class FaceLandmark:
|
||||
bbox[2] = center[0] + one_edge // 2
|
||||
bbox[3] = center[1] + one_edge // 2
|
||||
|
||||
bbox = bbox.astype(np.int)
|
||||
bbox = bbox.astype(int)
|
||||
crop_image = bimg[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
|
||||
h, w, _ = crop_image.shape
|
||||
crop_image = cv2.resize(crop_image,
|
||||
|
||||
@@ -31,7 +31,7 @@ class human_segmenter(object):
|
||||
img = np.dstack((img, img, img))
|
||||
elif img.shape[2] == 4:
|
||||
img = img[:, :, :3]
|
||||
img = img.astype(np.float)
|
||||
img = img.astype(float)
|
||||
return img
|
||||
|
||||
def run(self, img):
|
||||
|
||||
@@ -69,8 +69,8 @@ def eval_grid(coords,
|
||||
num_samples=512 * 512 * 512):
|
||||
resolution = coords.shape[1:4]
|
||||
sdf = np.zeros(resolution)
|
||||
dirty = np.ones(resolution, dtype=np.bool)
|
||||
grid_mask = np.zeros(resolution, dtype=np.bool)
|
||||
dirty = np.ones(resolution, dtype=bool)
|
||||
grid_mask = np.zeros(resolution, dtype=bool)
|
||||
reso = resolution[0] // init_resolution
|
||||
|
||||
while reso > 0:
|
||||
|
||||
@@ -163,7 +163,7 @@ def load_filtered_voc_instances(name: str, root: str, dirname: str, split: str,
|
||||
os.path.join(split_dir,
|
||||
'box_{}shot_{}_train.txt'.format(shot,
|
||||
cls))) as f:
|
||||
fileids_ = np.loadtxt(f, dtype=np.str).tolist()
|
||||
fileids_ = np.loadtxt(f, dtype=np.str_).tolist()
|
||||
if isinstance(fileids_, str):
|
||||
fileids_ = [fileids_]
|
||||
fileids_ = [
|
||||
@@ -219,7 +219,7 @@ def load_filtered_voc_instances(name: str, root: str, dirname: str, split: str,
|
||||
with PathManager.open(
|
||||
os.path.join(root, dirname, 'ImageSets', 'Main',
|
||||
split + '.txt')) as f:
|
||||
fileids = np.loadtxt(f, dtype=np.str)
|
||||
fileids = np.loadtxt(f, dtype=np.str_)
|
||||
|
||||
for fileid in fileids:
|
||||
anno_file = os.path.join(root, dirname, 'Annotations',
|
||||
|
||||
@@ -108,16 +108,16 @@ def get_img_ins_seg_result(img_seg_result=None,
|
||||
for seg_result in img_seg_result:
|
||||
|
||||
box = [
|
||||
np.int(seg_result[0]),
|
||||
np.int(seg_result[1]),
|
||||
np.int(seg_result[2]),
|
||||
np.int(seg_result[3])
|
||||
int(seg_result[0]),
|
||||
int(seg_result[1]),
|
||||
int(seg_result[2]),
|
||||
int(seg_result[3])
|
||||
]
|
||||
score = np.float(seg_result[4])
|
||||
score = float(seg_result[4])
|
||||
category = seg_result[5]
|
||||
|
||||
mask = np.array(seg_result[6], order='F', dtype='uint8')
|
||||
mask = mask.astype(np.float)
|
||||
mask = mask.astype(float)
|
||||
|
||||
results_dict[OutputKeys.BOXES].append(box)
|
||||
results_dict[OutputKeys.MASKS].append(mask)
|
||||
|
||||
@@ -382,7 +382,7 @@ def processing_single_scene(args):
|
||||
points3d[p3d_id].xyz[0], points3d[p3d_id].xyz[1],
|
||||
points3d[p3d_id].xyz[2], 1
|
||||
])
|
||||
zs.append(np.asscalar(transformed[2]))
|
||||
zs.append(transformed[2].item())
|
||||
zs_sorted = sorted(zs)
|
||||
# relaxed depth range
|
||||
max_ratio = 0.1
|
||||
|
||||
@@ -40,7 +40,7 @@ def read_mask(filename):
|
||||
|
||||
# save a binary mask
|
||||
def save_mask(filename, mask):
|
||||
assert mask.dtype == np.bool
|
||||
assert mask.dtype == bool
|
||||
mask = mask.astype(np.uint8) * 255
|
||||
Image.fromarray(mask).save(filename)
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ class SemanticSegmentation(TorchModel):
|
||||
ids = ids[legal_indices]
|
||||
|
||||
segms = (semantic_result[None] == ids[:, None, None])
|
||||
masks = [it.astype(np.int) for it in segms]
|
||||
masks = [it.astype(int) for it in segms]
|
||||
labels_txt = np.array(self.CLASSES)[ids].tolist()
|
||||
|
||||
results = {
|
||||
|
||||
@@ -458,7 +458,7 @@ class HrnetBackBone(nn.Module):
|
||||
self.stage4, pre_stage_channels = self._make_stage(
|
||||
self.stage4_cfg, num_channels, multi_scale_output=True)
|
||||
|
||||
self.backbone_last_inp_channels = np.int(np.sum(pre_stage_channels))
|
||||
self.backbone_last_inp_channels = int(np.sum(pre_stage_channels))
|
||||
|
||||
def _make_transition_layer(self, num_channels_pre_layer,
|
||||
num_channels_cur_layer):
|
||||
|
||||
@@ -259,7 +259,7 @@ class HrnetSuperAndOcr(HrnetBackBone):
|
||||
num_channels = [64, last_inp_channels]
|
||||
self.stage_super, super_stage_channels = self._make_stage(
|
||||
self.super_dict, num_channels)
|
||||
last_inp_channels = np.int(np.sum(super_stage_channels))
|
||||
last_inp_channels = int(np.sum(super_stage_channels))
|
||||
|
||||
if self.is_contain_aspp:
|
||||
aspp_param = kwargs['aspp']
|
||||
@@ -372,7 +372,7 @@ class HrnetSuperAndOcr(HrnetBackBone):
|
||||
num_channels = [64, ocr_mid_channels]
|
||||
self.stage_super, super_stage_channels = self._make_stage(
|
||||
self.super_dict, num_channels)
|
||||
last_inp_channels = np.int(np.sum(super_stage_channels))
|
||||
last_inp_channels = int(np.sum(super_stage_channels))
|
||||
|
||||
self.cls_head = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
|
||||
@@ -31,7 +31,7 @@ class ObjectSegmenter(object):
|
||||
elif img.shape[2] == 4:
|
||||
img = img[:, :, :3]
|
||||
img = img[:, :, ::-1]
|
||||
img = img.astype(np.float)
|
||||
img = img.astype(float)
|
||||
return img
|
||||
|
||||
def run_mask(self, img):
|
||||
|
||||
@@ -30,7 +30,7 @@ def depth2color(depth):
|
||||
if gray == 1:
|
||||
return tuple(colors[-1].tolist())
|
||||
num_rank = len(colors) - 1
|
||||
rank = np.floor(gray * num_rank).astype(np.int)
|
||||
rank = np.floor(gray * num_rank).astype(int)
|
||||
diff = (gray - rank / num_rank) * num_rank
|
||||
tmp = colors[rank + 1] - colors[rank]
|
||||
return tuple((colors[rank] + tmp * diff).tolist())
|
||||
@@ -136,7 +136,7 @@ def plot_result(res_path,
|
||||
l2g = get_lidar2global(infos)
|
||||
corners_lidar = corners_global @ np.linalg.inv(l2g).T
|
||||
corners_lidar = corners_lidar[:, :3]
|
||||
pred_flag = np.ones((corners_lidar.shape[0] // 8, ), dtype=np.bool)
|
||||
pred_flag = np.ones((corners_lidar.shape[0] // 8, ), dtype=bool)
|
||||
scores = [
|
||||
pred_res[rid]['detection_score'] for rid in range(len(pred_res))
|
||||
]
|
||||
@@ -151,7 +151,7 @@ def plot_result(res_path,
|
||||
origin=(0.5, 0.5, 0.5)).corners.numpy().reshape(-1, 3)
|
||||
corners_lidar = np.concatenate([corners_lidar, corners_lidar_gt],
|
||||
axis=0)
|
||||
gt_flag = np.ones((corners_lidar_gt.shape[0] // 8), dtype=np.bool)
|
||||
gt_flag = np.ones((corners_lidar_gt.shape[0] // 8), dtype=bool)
|
||||
pred_flag = np.concatenate(
|
||||
[pred_flag, np.logical_not(gt_flag)], axis=0)
|
||||
scores = scores + [0 for _ in range(infos['gt_boxes'].shape[0])]
|
||||
|
||||
@@ -176,8 +176,7 @@ class OpenVocabularyDetectionViLD(Model):
|
||||
# Filter out invalid rois (nmsed rois)
|
||||
valid_indices = np.where(
|
||||
np.logical_and(
|
||||
np.isin(
|
||||
np.arange(len(roi_scores), dtype=np.int), nmsed_indices),
|
||||
np.isin(np.arange(len(roi_scores), dtype=int), nmsed_indices),
|
||||
np.logical_and(
|
||||
np.logical_not(np.all(roi_boxes == 0., axis=-1)),
|
||||
np.logical_and(roi_scores >= min_rpn_score_thresh,
|
||||
|
||||
@@ -72,7 +72,7 @@ class Cube2Equirec(nn.Module):
|
||||
self.equ_h, 0), 3 * self.equ_w // 8, 1)
|
||||
|
||||
# Prepare ceil mask
|
||||
mask = np.zeros((self.equ_h, self.equ_w // 4), np.bool)
|
||||
mask = np.zeros((self.equ_h, self.equ_w // 4), bool)
|
||||
idx = np.linspace(-np.pi, np.pi, self.equ_w // 4) / 4
|
||||
idx = self.equ_h // 2 - np.round(
|
||||
np.arctan(np.cos(idx)) * self.equ_h / np.pi).astype(int)
|
||||
|
||||
@@ -29,7 +29,7 @@ def load_depth(file):
|
||||
elif file.endswith('png'):
|
||||
depth_png = np.array(load_image(file), dtype=int)
|
||||
assert (np.max(depth_png) > 255), 'Wrong .png depth file'
|
||||
return depth_png.astype(np.float) / 256.
|
||||
return depth_png.astype(float) / 256.
|
||||
else:
|
||||
raise NotImplementedError('Depth extension not supported.')
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ def do_scene_detect(F01_tensor, F10_tensor, img0_tensor, img1_tensor):
|
||||
img_diff = ori_img.float() - ref_img.float()
|
||||
img_diff = torch.abs(img_diff)
|
||||
|
||||
kernel = np.ones([8, 8], np.float) / 64
|
||||
kernel = np.ones([8, 8], float) / 64
|
||||
kernel = torch.FloatTensor(kernel).to(device).unsqueeze(0).unsqueeze(0)
|
||||
diff = F.conv2d(img_diff, kernel, padding=4)
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def linear_assignment(cost_matrix, thresh):
|
||||
|
||||
|
||||
def ious(atlbrs, btlbrs):
|
||||
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
|
||||
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=float)
|
||||
if ious.size == 0:
|
||||
return ious
|
||||
|
||||
@@ -60,13 +60,13 @@ def embedding_distance(tracks, detections, metric='cosine'):
|
||||
cost_matrix: np.ndarray
|
||||
"""
|
||||
|
||||
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
|
||||
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=float)
|
||||
if cost_matrix.size == 0:
|
||||
return cost_matrix
|
||||
det_features = np.asarray([track.curr_feat for track in detections],
|
||||
dtype=np.float)
|
||||
dtype=float)
|
||||
track_features = np.asarray([track.smooth_feat for track in tracks],
|
||||
dtype=np.float)
|
||||
dtype=float)
|
||||
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric))
|
||||
return cost_matrix
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ class STrack(BaseTrack):
|
||||
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
|
||||
|
||||
# wait activate
|
||||
self._tlwh = np.asarray(tlwh, dtype=np.float)
|
||||
self._tlwh = np.asarray(tlwh, dtype=float)
|
||||
self.kalman_filter = None
|
||||
self.mean, self.covariance = None, None
|
||||
self.is_activated = False
|
||||
|
||||
@@ -128,13 +128,13 @@ class VideoCLIPForMultiModalEmbedding(TorchModel):
|
||||
local_transform,
|
||||
s=None,
|
||||
e=None):
|
||||
video_mask = np.zeros(self.max_frames, dtype=np.long)
|
||||
video_mask = np.zeros(self.max_frames, dtype=int)
|
||||
max_video_length = 0
|
||||
|
||||
# T x 3 x H x W
|
||||
video = np.zeros((self.max_frames, 3, rawVideoExtractor.size,
|
||||
rawVideoExtractor.size),
|
||||
dtype=np.float)
|
||||
dtype=float)
|
||||
|
||||
if s is None:
|
||||
start_time, end_time = None, None
|
||||
|
||||
@@ -212,10 +212,10 @@ class ConstructBlockStrategy:
|
||||
block_spans,
|
||||
rng,
|
||||
task='bert'):
|
||||
position_ids = np.arange(len(tokens), dtype=np.long)
|
||||
position_ids = np.arange(len(tokens), dtype=int)
|
||||
targets = copy.deepcopy(tokens)
|
||||
mask_id = self.tokenizer.get_command('MASK').Id
|
||||
mlm_masks = np.zeros(len(tokens), dtype=np.long)
|
||||
mlm_masks = np.zeros(len(tokens), dtype=int)
|
||||
for start, end in block_spans:
|
||||
for idx in range(start, end):
|
||||
tokens[idx] = mask_id
|
||||
@@ -231,7 +231,7 @@ class ConstructBlockStrategy:
|
||||
rng,
|
||||
task='bert'):
|
||||
text_length = len(tokens)
|
||||
position_ids = np.ones(len(tokens), dtype=np.long)
|
||||
position_ids = np.ones(len(tokens), dtype=int)
|
||||
for start, end in block_spans:
|
||||
position_ids[start + 1:end] = 0
|
||||
position_ids = np.cumsum(position_ids) - 1
|
||||
@@ -270,7 +270,7 @@ class ConstructBlockStrategy:
|
||||
(end - start + 1))
|
||||
if self.block_position_encoding:
|
||||
target_block_position_ids.append(
|
||||
np.arange(1, end - start + 2, dtype=np.long))
|
||||
np.arange(1, end - start + 2, dtype=int))
|
||||
else:
|
||||
target_block_position_ids.append([1] * (end - start + 1))
|
||||
block_spans.sort(key=lambda x: x[0])
|
||||
@@ -307,7 +307,7 @@ class ConstructBlockStrategy:
|
||||
target_tokens = target_tokens + [
|
||||
self.tokenizer.get_command('eop').Id
|
||||
]
|
||||
loss_masks = np.ones(len(target_tokens), dtype=np.long)
|
||||
loss_masks = np.ones(len(target_tokens), dtype=int)
|
||||
return source_tokens, target_tokens, loss_masks
|
||||
else:
|
||||
tokens = np.concatenate(source_tokens + target_tokens)
|
||||
@@ -326,12 +326,12 @@ class ConstructBlockStrategy:
|
||||
for pos in mask_pos:
|
||||
tokens[pos] = self.tokenizer.get_command('dBLOCK').Id
|
||||
targets = np.concatenate(source_tokens + targets)
|
||||
loss_masks = np.ones(len(tokens), dtype=np.long)
|
||||
loss_masks = np.ones(len(tokens), dtype=int)
|
||||
loss_masks[:source_length] = 0
|
||||
position_ids = np.concatenate(source_position_ids
|
||||
+ target_position_ids)
|
||||
block_position_ids = np.concatenate(
|
||||
[np.zeros(source_length, dtype=np.long)]
|
||||
[np.zeros(source_length, dtype=int)]
|
||||
+ target_block_position_ids)
|
||||
position_ids = np.stack([position_ids, block_position_ids], axis=0)
|
||||
if attention_mask is not None:
|
||||
@@ -539,22 +539,21 @@ class ConstructBlockStrategy:
|
||||
(source_tokens, [self.generation_mask], target_tokens))
|
||||
loss_masks = np.concatenate(
|
||||
(np.zeros(len(source_tokens) + 1,
|
||||
dtype=np.long), target_masks))
|
||||
dtype=int), target_masks))
|
||||
token_batch.append(tokens)
|
||||
target_batch.append(targets)
|
||||
loss_mask_batch.append(loss_masks)
|
||||
position_ids = np.arange(
|
||||
len(source_tokens) + len(target_tokens) + 1,
|
||||
dtype=np.long)
|
||||
len(source_tokens) + len(target_tokens) + 1, dtype=int)
|
||||
position_ids[len(source_tokens) + 1:] = len(source_tokens)
|
||||
if self.block_position_encoding:
|
||||
block_position_ids = np.concatenate(
|
||||
(np.zeros(len(source_tokens), dtype=np.long),
|
||||
np.arange(len(target_tokens) + 1, dtype=np.long)))
|
||||
(np.zeros(len(source_tokens), dtype=int),
|
||||
np.arange(len(target_tokens) + 1, dtype=int)))
|
||||
else:
|
||||
block_position_ids = np.concatenate(
|
||||
(np.zeros(len(source_tokens) + 1, dtype=np.long),
|
||||
np.ones(len(target_tokens) + 1, dtype=np.long)))
|
||||
(np.zeros(len(source_tokens) + 1, dtype=int),
|
||||
np.ones(len(target_tokens) + 1, dtype=int)))
|
||||
position_id_batch.append(
|
||||
np.stack([position_ids, block_position_ids], axis=0))
|
||||
else:
|
||||
@@ -597,27 +596,25 @@ class ConstructBlockStrategy:
|
||||
max_length = max(seq_lengths)
|
||||
token_batch = [
|
||||
np.concatenate(
|
||||
(tokens, np.zeros(max_length - len(tokens),
|
||||
dtype=np.long)))
|
||||
(tokens, np.zeros(max_length - len(tokens), dtype=int)))
|
||||
for tokens in token_batch
|
||||
]
|
||||
target_batch = [
|
||||
np.concatenate(
|
||||
(targets,
|
||||
np.zeros(max_length - len(targets), dtype=np.long)))
|
||||
(targets, np.zeros(max_length - len(targets), dtype=int)))
|
||||
for targets in target_batch
|
||||
]
|
||||
loss_mask_batch = [
|
||||
np.concatenate(
|
||||
(loss_masks,
|
||||
np.zeros(max_length - len(loss_masks), dtype=np.long)))
|
||||
np.zeros(max_length - len(loss_masks), dtype=int)))
|
||||
for loss_masks in loss_mask_batch
|
||||
]
|
||||
position_id_batch = [
|
||||
np.concatenate((position_ids,
|
||||
np.zeros(
|
||||
(2, max_length - position_ids.shape[1]),
|
||||
dtype=np.long)),
|
||||
axis=1) for position_ids in position_id_batch
|
||||
np.concatenate(
|
||||
(position_ids,
|
||||
np.zeros(
|
||||
(2, max_length - position_ids.shape[1]), dtype=int)),
|
||||
axis=1) for position_ids in position_id_batch
|
||||
]
|
||||
return token_batch, target_batch, loss_mask_batch, position_id_batch
|
||||
|
||||
@@ -583,8 +583,8 @@ class XLDataset(data.Dataset):
|
||||
def getidx(self, idx):
|
||||
tokens, targets, loss_masks = [], [], []
|
||||
attention_mask = np.concatenate(
|
||||
(np.zeros((self.max_seq_len, self.mem_len), dtype=np.long),
|
||||
np.ones((self.max_seq_len, self.max_seq_len), dtype=np.long)),
|
||||
(np.zeros((self.max_seq_len, self.mem_len), dtype=int),
|
||||
np.ones((self.max_seq_len, self.max_seq_len), dtype=int)),
|
||||
axis=1)
|
||||
sample_idx = bisect_right(self.indices, idx * self.max_seq_len)
|
||||
last_end = 0 if sample_idx == 0 else self.indices[sample_idx - 1]
|
||||
|
||||
@@ -28,7 +28,7 @@ def main():
|
||||
counts = np.array([0] * 10)
|
||||
for _ in range(10000):
|
||||
spans = strategy.sample_span_in_document(
|
||||
np.array([1, 2, 3, 0, 4, 5, 6, 7, 9, 0], dtype=np.long), [1, 1],
|
||||
np.array([1, 2, 3, 0, 4, 5, 6, 7, 9, 0], dtype=int), [1, 1],
|
||||
random.Random())
|
||||
for start, end in spans:
|
||||
counts[start:end] += 1
|
||||
|
||||
@@ -17,7 +17,7 @@ def main():
|
||||
num_iters=300000,
|
||||
decay_style='cosine',
|
||||
decay_ratio=0.1)
|
||||
steps = np.arange(0, 400000, 10, dtype=np.long)
|
||||
steps = np.arange(0, 400000, 10, dtype=int)
|
||||
rates = []
|
||||
for step in steps:
|
||||
lr_scheduler.num_iters = step
|
||||
|
||||
@@ -115,7 +115,7 @@ def pad_features(feature: np.ndarray, feature_name: str) -> np.ndarray:
|
||||
Returns:
|
||||
The feature with an additional padding row.
|
||||
"""
|
||||
assert feature.dtype != np.dtype(np.string_)
|
||||
assert feature.dtype != np.dtype(np.str_)
|
||||
if feature_name in (
|
||||
'msa_all_seq',
|
||||
'msa_mask_all_seq',
|
||||
|
||||
@@ -1100,9 +1100,9 @@ class HmmsearchHitFeaturizer(TemplateHitFeaturizer):
|
||||
np.zeros((1, num_res, residue_constants.atom_type_num, 3),
|
||||
np.float32),
|
||||
'template_domain_names':
|
||||
np.array([''.encode()], dtype=np.object),
|
||||
np.array([''.encode()], dtype=np.object_),
|
||||
'template_sequence':
|
||||
np.array([''.encode()], dtype=np.object),
|
||||
np.array([''.encode()], dtype=np.object_),
|
||||
'template_sum_probs':
|
||||
np.array([0], dtype=np.float32),
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ class FaceReconstructionPipeline(Pipeline):
|
||||
img = LoadImage.convert_to_ndarray(input)
|
||||
if len(img.shape) == 2:
|
||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
img = img.astype(np.float)
|
||||
img = img.astype(float)
|
||||
result = {'img': img}
|
||||
return result
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ class ImageMattingPipeline(Pipeline):
|
||||
|
||||
def preprocess(self, input: Input) -> Dict[str, Any]:
|
||||
img = LoadImage.convert_to_ndarray(input)
|
||||
img = img.astype(np.float)
|
||||
img = img.astype(float)
|
||||
result = {'img': img}
|
||||
return result
|
||||
|
||||
|
||||
@@ -73,12 +73,12 @@ class ImageStyleTransferPipeline(Pipeline):
|
||||
content = LoadImage.convert_to_ndarray(content)
|
||||
if len(content.shape) == 2:
|
||||
content = cv2.cvtColor(content, cv2.COLOR_GRAY2BGR)
|
||||
content_img = content.astype(np.float)
|
||||
content_img = content.astype(float)
|
||||
|
||||
style_img = LoadImage.convert_to_ndarray(style)
|
||||
if len(style_img.shape) == 2:
|
||||
style_img = cv2.cvtColor(style_img, cv2.COLOR_GRAY2BGR)
|
||||
style_img = style_img.astype(np.float)
|
||||
style_img = style_img.astype(float)
|
||||
|
||||
result = {'content': content_img, 'style': style_img}
|
||||
return result
|
||||
|
||||
@@ -105,7 +105,7 @@ class SkinRetouchingPipeline(Pipeline):
|
||||
img = LoadImage.convert_to_ndarray(input)
|
||||
if len(img.shape) == 2:
|
||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
img = img.astype(np.float)
|
||||
img = img.astype(float)
|
||||
result = {'img': img}
|
||||
return result
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ class TBSDetectionPipeline(Pipeline):
|
||||
- **labels** (`List[str]`, optional) -- The boxes's class_names of detected object in image.
|
||||
"""
|
||||
img = LoadImage.convert_to_ndarray(input)
|
||||
img = img.astype(np.float)
|
||||
img = img.astype(float)
|
||||
result = {'img': img, 'img_path': input}
|
||||
return result
|
||||
|
||||
|
||||
@@ -483,9 +483,9 @@ def numpify_tensor_nested(tensors, reduction=None, clip_value=10000):
|
||||
t = np.where(t > clip_value, clip_value, t)
|
||||
t = np.where(t < -clip_value, -clip_value, t)
|
||||
if reduction == 'sum':
|
||||
return t.sum(dtype=np.float)
|
||||
return t.sum(dtype=float)
|
||||
elif reduction == 'mean':
|
||||
return t.mean(dtype=np.float)
|
||||
return t.mean(dtype=float)
|
||||
return t
|
||||
return tensors
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ def compare_arguments_nested(print_content,
|
||||
|
||||
if arg1 is None:
|
||||
return True
|
||||
elif isinstance(arg1, (int, str, bool, np.bool, np.integer, np.str)):
|
||||
elif isinstance(arg1, (int, str, bool, np.bool_, np.integer, np.str_)):
|
||||
if arg1 != arg2:
|
||||
if print_content is not None:
|
||||
print(f'{print_content}, arg1:{arg1}, arg2:{arg2}')
|
||||
@@ -201,10 +201,8 @@ def compare_arguments_nested(print_content,
|
||||
return False
|
||||
return True
|
||||
elif isinstance(arg1, np.ndarray):
|
||||
arg1 = np.where(np.equal(arg1, None), np.NaN,
|
||||
arg1).astype(dtype=np.float)
|
||||
arg2 = np.where(np.equal(arg2, None), np.NaN,
|
||||
arg2).astype(dtype=np.float)
|
||||
arg1 = np.where(np.equal(arg1, None), np.NaN, arg1).astype(dtype=float)
|
||||
arg2 = np.where(np.equal(arg2, None), np.NaN, arg2).astype(dtype=float)
|
||||
if not all(
|
||||
np.isclose(arg1, arg2, rtol=rtol, atol=atol,
|
||||
equal_nan=True).flatten()):
|
||||
|
||||
@@ -7,6 +7,8 @@ gast>=0.2.2
|
||||
mmdet<=2.28.2
|
||||
numpy<1.24.0
|
||||
oss2
|
||||
# for datasets compatible
|
||||
pandas<=1.5.3
|
||||
Pillow>=6.2.0
|
||||
# pyarrow 9.0.0 introduced event_loop core dump
|
||||
pyarrow>=6.0.0,!=9.0.0
|
||||
|
||||
Reference in New Issue
Block a user