Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Resolve issue 2404 #2556

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion alignment/coordinate_reg/image_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
color = (200, 160, 75)
for face in faces:
lmk = face.landmark_2d_106
lmk = np.round(lmk).astype(np.int)
lmk = np.round(lmk).astype(np.int32)
for i in range(lmk.shape[0]):
p = tuple(lmk[i])
cv2.circle(tim, p, 1, color, 1, cv2.LINE_AA)
Expand Down
2 changes: 1 addition & 1 deletion alignment/heatmap/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def cal_nme(self, label, pred_label):
ind_gt = np.array(ind_gt)
else:
ind_gt = label[b][p]
#ind_gt = ind_gt.astype(np.int)
#ind_gt = ind_gt.astype(np.int32)
#print(ind_gt)
heatmap_pred = pred_label[b][p]
heatmap_pred = cv2.resize(
Expand Down
2 changes: 1 addition & 1 deletion alignment/synthetics/datasets/augs.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def apply(self, image, border_size_limit, **params):
border_size[2] *= image.shape[1]
border_size[1] *= image.shape[0]
border_size[3] *= image.shape[0]
border_size = border_size.astype(np.int)
border_size = border_size.astype(np.int32)
image[:,:border_size[0],:] = self.fill_value
image[:border_size[1],:,:] = self.fill_value
image[:,-border_size[2]:,:] = self.fill_value
Expand Down
4 changes: 2 additions & 2 deletions alignment/synthetics/test_synthetics.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,10 @@
outf.write(' ')
outf.write(' '.join(["%.5f"%x for x in kps.flatten()]))
outf.write("\n")
box = bbox.astype(np.int)
box = bbox.astype(np.int32)
color = (0, 0, 255)
cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
kps = kps.astype(np.int)
kps = kps.astype(np.int32)
#print(landmark.shape)
for l in range(kps.shape[0]):
color = (0, 0, 255)
Expand Down
4 changes: 2 additions & 2 deletions alignment/synthetics/tools/prepare_synthetics.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@
_scale = output_size / (max(w, h)*1.5)
aimg, M = face_align.transform(dimg, center, output_size, _scale, rotate)
pred = face_align.trans_points(pred, M)
#box = bbox.astype(np.int)
#box = bbox.astype(np.int32)
#color = (0, 0, 255)
#cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)

#kps = pred.astype(np.int)
#kps = pred.astype(np.int32)
#for l in range(kps.shape[0]):
# color = (0, 0, 255)
# cv2.circle(aimg, (kps[l][0], kps[l][1]), 1, color, 2)
Expand Down
4 changes: 2 additions & 2 deletions body/human_pose/ambiguity_aware/scripts/eval_lsp.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@
else:
color = "darkorange"
cv_color = (89, 141, 252)
x1, y1 = joints_2d[i].astype(np.int)
x2, y2 = joints_2d[j].astype(np.int)
x1, y1 = joints_2d[i].astype(np.int32)
x2, y2 = joints_2d[j].astype(np.int32)

cv2.line(image, (x1, y1), (x2, y2), cv_color, 2)
x1, y1, z1 = joints_3d_pre[i]
Expand Down
4 changes: 2 additions & 2 deletions body/human_pose/ambiguity_aware/scripts/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@
else:
color = "darkorange"
cv_color = (89, 141, 252)
x1, y1 = joints_2d[i].astype(np.int)
x2, y2 = joints_2d[j].astype(np.int)
x1, y1 = joints_2d[i].astype(np.int32)
x2, y2 = joints_2d[j].astype(np.int32)

cv2.line(image, (x1, y1), (x2, y2), cv_color, 2)
x1, y1, z1 = joints_3d_pre[i]
Expand Down
6 changes: 3 additions & 3 deletions detection/retinaface/rcnn/PY_OP/cascade_refine.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,13 +344,13 @@ def forward(self, is_train, req, in_data, out_data, aux):
assert anchors_t1.shape[0] == self.ori_anchors.shape[0]

#for i in range(_gt_boxes.shape[0]):
# box = _gt_boxes[i].astype(np.int)
# box = _gt_boxes[i].astype(np.int32)
# print('%d: gt%d'%(self.nbatch, i), box)
# #color = (0,0,255)
# #cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
#for i in range(anchors_t1.shape[0]):
# box1 = self.ori_anchors[i].astype(np.int)
# box2 = anchors_t1[i].astype(np.int)
# box1 = self.ori_anchors[i].astype(np.int32)
# box2 = anchors_t1[i].astype(np.int32)
# print('%d %d: anchorscompare %d'%(self.nbatch, self.stride, i), box1, box2)
#color = (255,255,0)
#cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
Expand Down
2 changes: 1 addition & 1 deletion detection/retinaface/rcnn/core/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def get_batch(self):
print('DEBUG SHAPE', data['data'].shape,
label['gt_boxes'].shape)

box = label['gt_boxes'].copy()[0][0:4].astype(np.int)
box = label['gt_boxes'].copy()[0][0:4].astype(np.int32)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]),
(0, 255, 0), 2)
filename = './debugout/%d.png' % (self._debug_id)
Expand Down
2 changes: 1 addition & 1 deletion detection/retinaface/rcnn/cython/cpu_nms.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):

cdef int ndets = dets.shape[0]
cdef np.ndarray[np.int_t, ndim=1] suppressed = \
np.zeros((ndets), dtype=np.int)
np.zeros((ndets), dtype=np.int32)

# nominal indices
cdef int _i, _j
Expand Down
2 changes: 1 addition & 1 deletion detection/retinaface/rcnn/dataset/ds_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
def unique_boxes(boxes, scale=1.0):
""" return indices of unique boxes """
v = np.array([1, 1e3, 1e6, 1e9])
hashes = np.round(boxes * scale).dot(v).astype(np.int)
hashes = np.round(boxes * scale).dot(v).astype(np.int32)
_, index = np.unique(hashes, return_index=True)
return np.sort(index)

Expand Down
24 changes: 12 additions & 12 deletions detection/retinaface/rcnn/io/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def get_image(roidb, scale=False):
if 'boxes_mask' in roi_rec:
im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy() * im_scale
boxes_mask = boxes_mask.astype(np.int)
boxes_mask = boxes_mask.astype(np.int32)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im_tensor[:, :, m[1]:m[3], m[0]:m[2]] = 0.0
Expand Down Expand Up @@ -156,7 +156,7 @@ def __get_crop_image(roidb):
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
boxes_mask = boxes_mask.astype(np.int32)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3], m[0]:m[2], :] = 0
Expand Down Expand Up @@ -197,7 +197,7 @@ def __get_crop_image(roidb):
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
box_scale = new_rec['boxes'][box_ind].copy().astype(np.int)
box_scale = new_rec['boxes'][box_ind].copy().astype(np.int32)
ul_min = box_scale[2:4] - SIZE
ul_max = box_scale[0:2]
assert ul_min[0] <= ul_max[0]
Expand All @@ -215,15 +215,15 @@ def __get_crop_image(roidb):
im = cv2.warpAffine(im,
M, (SIZE, SIZE),
borderValue=tuple(config.PIXEL_MEANS))
#tbox = np.array([left, left+SIZE, up, up+SIZE], dtype=np.int)
#tbox = np.array([left, left+SIZE, up, up+SIZE], dtype=np.int32)
#im_new = np.zeros( (SIZE, SIZE,3), dtype=im.dtype)
#for i in range(3):
# im_new[:,:,i] = config.PIXEL_MEANS[i]
new_rec['boxes'][:, 0] -= left
new_rec['boxes'][:, 2] -= left
new_rec['boxes'][:, 1] -= up
new_rec['boxes'][:, 3] -= up
box_trans = new_rec['boxes'][box_ind].copy().astype(np.int)
box_trans = new_rec['boxes'][box_ind].copy().astype(np.int32)
#print('sel box', im_scale, box, box_scale, box_trans, file=sys.stderr)
#print('before', new_rec['boxes'].shape[0])
boxes_new = []
Expand All @@ -249,7 +249,7 @@ def __get_crop_image(roidb):
if TMP_ID < 10:
tim = im.copy()
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
box = new_rec['boxes'][i].copy().astype(np.int32)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]),
(255, 0, 0), 1)
filename = './trainimages/train%d.png' % TMP_ID
Expand Down Expand Up @@ -320,7 +320,7 @@ def get_crop_image1(roidb):
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
boxes_mask = boxes_mask.astype(np.int32)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3], m[0]:m[2], :] = 127
Expand Down Expand Up @@ -437,7 +437,7 @@ def get_crop_image1(roidb):
if TMP_ID >= 0 and TMP_ID < 10:
tim = im.copy().astype(np.uint8)
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
box = new_rec['boxes'][i].copy().astype(np.int32)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]),
(255, 0, 0), 1)
print('draw box:', box)
Expand All @@ -447,7 +447,7 @@ def get_crop_image1(roidb):
if landmark[0][2] < 0:
print('zero', landmark)
continue
landmark = landmark.astype(np.int)
landmark = landmark.astype(np.int32)
print('draw landmark', landmark)
for k in range(5):
color = (0, 0, 255)
Expand Down Expand Up @@ -498,7 +498,7 @@ def get_crop_image2(roidb):
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
boxes_mask = boxes_mask.astype(np.int32)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3], m[0]:m[2], :] = 0
Expand Down Expand Up @@ -697,7 +697,7 @@ def get_crop_image2(roidb):
if TMP_ID >= 0 and TMP_ID < 10:
tim = im.copy().astype(np.uint8)
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
box = new_rec['boxes'][i].copy().astype(np.int32)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]),
(255, 0, 0), 1)
print('draw box:', box)
Expand All @@ -707,7 +707,7 @@ def get_crop_image2(roidb):
if landmark[10] == 0.0:
print('zero', landmark)
continue
landmark = landmark.astype(np.int)
landmark = landmark.astype(np.int32)
print('draw landmark', landmark)
for k in range(5):
color = (0, 0, 255)
Expand Down
4 changes: 2 additions & 2 deletions detection/retinaface/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@
print('find', faces.shape[0], 'faces')
for i in range(faces.shape[0]):
#print('score', faces[i][4])
box = faces[i].astype(np.int)
box = faces[i].astype(np.int32)
#color = (255,0,0)
color = (0, 0, 255)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
if landmarks is not None:
landmark5 = landmarks[i].astype(np.int)
landmark5 = landmarks[i].astype(np.int32)
#print(landmark.shape)
for l in range(landmark5.shape[0]):
color = (0, 0, 255)
Expand Down
2 changes: 1 addition & 1 deletion detection/retinaface/test_widerface.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def get_boxes(roi, pyramid):
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(boxes.shape[0]):
box = boxes[i]
ibox = box[0:4].copy().astype(np.int)
ibox = box[0:4].copy().astype(np.int32)
cv2.rectangle(im, (ibox[0], ibox[1]), (ibox[2], ibox[3]),
(255, 0, 0), 2)
#print('box', ibox)
Expand Down
4 changes: 2 additions & 2 deletions detection/retinaface_anticov/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
for i in range(faces.shape[0]):
#print('score', faces[i][4])
face = faces[i]
box = face[0:4].astype(np.int)
box = face[0:4].astype(np.int32)
mask = face[5]
print(i, box, mask)
#color = (255,0,0)
Expand All @@ -55,7 +55,7 @@
else:
color = (0, 255, 0)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
landmark5 = landmarks[i].astype(np.int)
landmark5 = landmarks[i].astype(np.int32)
#print(landmark.shape)
for l in range(landmark5.shape[0]):
color = (255, 0, 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def sample_via_interval(self, max_overlaps, full_set, num_expected):
tmp_sampled_set = self.random_choice(tmp_inds,
per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
tmp_sampled_set = np.array(tmp_inds, dtype=np.int32)
sampled_inds.append(tmp_sampled_set)

sampled_inds = np.concatenate(sampled_inds)
Expand Down Expand Up @@ -137,13 +137,13 @@ def _sample_neg(self, assign_result, num_expected, **kwargs):
iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
iou_sampling_neg_inds, dtype=np.int32)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = self.random_choice(
floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int32)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
Expand Down
6 changes: 3 additions & 3 deletions detection/scrfd/mmdet/core/evaluation/widerface.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def image_eval(pred, gt, ignore, iou_thresh, mpp):

def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
pr_info = np.zeros((thresh_num, 2)).astype('float')
fp = np.zeros((pred_info.shape[0],), dtype=np.int)
fp = np.zeros((pred_info.shape[0],), dtype=np.int32)
last_info = [-1, -1]
for t in range(thresh_num):

Expand Down Expand Up @@ -429,7 +429,7 @@ def wider_evaluation(pred, gt_path, iou_thresh=0.5, debug=False):
#if len(keep_index) != 0:
# ignore[keep_index-1] = 1
#assert len(keep_index)>0
ignore = np.zeros(gt_boxes.shape[0], dtype=np.int)
ignore = np.zeros(gt_boxes.shape[0], dtype=np.int32)
if len(keep_index) != 0:
ignore[keep_index-1] = 1
pred_info = np_round(pred_info,1)
Expand Down Expand Up @@ -523,7 +523,7 @@ def get_widerface_gts(gt_path):
#if len(keep_index) != 0:
# ignore[keep_index-1] = 1
#assert len(keep_index)>0
#ignore = np.zeros(gt_boxes.shape[0], dtype=np.int)
#ignore = np.zeros(gt_boxes.shape[0], dtype=np.int32)
#if len(keep_index) != 0:
# ignore[keep_index-1] = 1
#print('ignore:', len(ignore), len(np.where(ignore==1)[0]))
Expand Down
2 changes: 1 addition & 1 deletion detection/scrfd/mmdet/datasets/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def get_cat_ids(self, idx):
list[int]: All categories in the image of specified index.
"""

return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
return self.data_infos[idx]['ann']['labels'].astype(np.int32).tolist()

def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
Expand Down
2 changes: 1 addition & 1 deletion detection/scrfd/mmdet/datasets/pipelines/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -907,7 +907,7 @@ def __call__(self, results):
top = random.randint(h - ch, 0)

patch = np.array(
(int(left), int(top), int(left + cw), int(top + ch)), dtype=np.int)
(int(left), int(top), int(left + cw), int(top + ch)), dtype=np.int32)

# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
>>> labels = torch.randint(0, 2, (n_roi,)).long()
>>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
>>> # For each image, pretend random positive boxes are gts
>>> is_label_pos = (labels.numpy() > 0).astype(np.int)
>>> is_label_pos = (labels.numpy() > 0).astype(np.int32)
>>> lbl_per_img = kwarray.group_items(is_label_pos,
... img_ids.numpy())
>>> pos_per_img = [sum(lbl_per_img.get(gid, []))
Expand Down
4 changes: 2 additions & 2 deletions detection/scrfd/tools/scrfd.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,12 +324,12 @@ def scrfd_2p5gkps(**kwargs):
print(kpss.shape)
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
x1,y1,x2,y2,score = bbox.astype(np.int)
x1,y1,x2,y2,score = bbox.astype(np.int32)
cv2.rectangle(img, (x1,y1) , (x2,y2) , (255,0,0) , 2)
if kpss is not None:
kps = kpss[i]
for kp in kps:
kp = kp.astype(np.int)
kp = kp.astype(np.int32)
cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2)
filename = img_path.split('/')[-1]
print('output:', filename)
Expand Down
4 changes: 2 additions & 2 deletions examples/person_detection/scrfd_person.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@

def detect_person(img, detector):
bboxes, kpss = detector.detect(img)
bboxes = np.round(bboxes[:,:4]).astype(np.int)
kpss = np.round(kpss).astype(np.int)
bboxes = np.round(bboxes[:,:4]).astype(np.int32)
kpss = np.round(kpss).astype(np.int32)
kpss[:,:,0] = np.clip(kpss[:,:,0], 0, img.shape[1])
kpss[:,:,1] = np.clip(kpss[:,:,1], 0, img.shape[0])
vbboxes = bboxes.copy()
Expand Down
4 changes: 2 additions & 2 deletions python-package/insightface/model_zoo/scrfd.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,12 +335,12 @@ def scrfd_2p5gkps(**kwargs):
print(kpss.shape)
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
x1,y1,x2,y2,score = bbox.astype(np.int)
x1,y1,x2,y2,score = bbox.astype(np.int32)
cv2.rectangle(img, (x1,y1) , (x2,y2) , (255,0,0) , 2)
if kpss is not None:
kps = kpss[i]
for kp in kps:
kp = kp.astype(np.int)
kp = kp.astype(np.int32)
cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2)
filename = img_path.split('/')[-1]
print('output:', filename)
Expand Down
Loading