diff --git a/.idea/CASD.iml b/.idea/CASD.iml
new file mode 100644
index 0000000..7c9d48f
--- /dev/null
+++ b/.idea/CASD.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/encodings.xml b/.idea/encodings.xml
new file mode 100644
index 0000000..15a15b2
--- /dev/null
+++ b/.idea/encodings.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..cf7d47d
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..99d4ae6
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 0000000..01df77f
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,259 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ self._losses['cls_det_loss']
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1607204328623
+
+
+ 1607204328623
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/experiments/cfgs/change_log.txt b/experiments/cfgs/change_log.txt
new file mode 100644
index 0000000..23d68d5
--- /dev/null
+++ b/experiments/cfgs/change_log.txt
@@ -0,0 +1,48 @@
+2018/11/22/9:37
+In the vgg16.yml files, The orginal TEST.NMS value is 0.3 but changed to 0.4 for
+implementation for WSDNN
+
+2018/11/23/14:19
+In vgg16.yml files, the original POOLING_MODE == crop, however changed to roi_align
+for implementation for WSDNN
+
+2018/11/24/8:37
+In vgg16.yml file, the original TRAIN.WEIGHT_DECAY == 0.0001, and changed to 0.005
+Also ss_boxes whose widths or heights are less than 20 are removed
+
+2018/11/24/9:01
+In vgg16.yml files, the TRAIN.SCALES is {600,}, however changed to {480, 576, 688, 864, 1200}
+TEST.SCLAES is {688, }
+
+2018/11/24/9:23
+In vgg16.yml file, the TRAIN.MAX_SIZE is 1000, however changed to 1200
+
+2018/11/24/20:33
+In lib/datasets/pascal_voc.py, changed classes from 21 classes to 20 classes
+original:
+self._classes = ('__background__', # always index 0
+ 'aeroplane', 'bicycle', 'bird', 'boat',
+ 'bottle', 'bus', 'car', 'cat', 'chair',
+ 'cow', 'diningtable', 'dog', 'horse',
+ 'motorbike', 'person', 'pottedplant',
+ 'sheep', 'sofa', 'train', 'tvmonitor')
+new:
+self._classes = (
+ 'aeroplane', 'bicycle', 'bird', 'boat',
+ 'bottle', 'bus', 'car', 'cat', 'chair',
+ 'cow', 'diningtable', 'dog', 'horse',
+ 'motorbike', 'person', 'pottedplant',
+ 'sheep', 'sofa', 'train', 'tvmonitor')
+In network.py, added a score_det_net
+
+In vgg16.yml file, changed TRAIN.STEPSIZE from 30000 to 60000
+
+
+2018/11/25/9:59
+In vgg16.yml, the learning rate is 0.0001, and changed to 0.0005.
+
+2018/11/26/11:14
+In vgg16.yml, the TEST.SCLAES changed from [688] to [480, 576, 688, 864, 1200]
+for multiple scale test
+
+
diff --git a/experiments/cfgs/mobile.yml b/experiments/cfgs/mobile.yml
new file mode 100644
index 0000000..8adbf94
--- /dev/null
+++ b/experiments/cfgs/mobile.yml
@@ -0,0 +1,16 @@
+EXP_DIR: mobile
+TRAIN:
+ HAS_RPN: True
+ IMS_PER_BATCH: 1
+ BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
+ RPN_POSITIVE_OVERLAP: 0.7
+ RPN_BATCHSIZE: 256
+ PROPOSAL_METHOD: gt
+ BG_THRESH_LO: 0.0
+ DISPLAY: 20
+ BATCH_SIZE: 256
+ DOUBLE_BIAS: False
+ SNAPSHOT_PREFIX: mobile_faster_rcnn
+TEST:
+ HAS_RPN: True
+POOLING_MODE: crop
diff --git a/experiments/cfgs/res101-lg.yml b/experiments/cfgs/res101-lg.yml
new file mode 100644
index 0000000..ce4aefb
--- /dev/null
+++ b/experiments/cfgs/res101-lg.yml
@@ -0,0 +1,22 @@
+EXP_DIR: res101-lg
+TRAIN:
+ HAS_RPN: True
+ IMS_PER_BATCH: 1
+ BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
+ RPN_POSITIVE_OVERLAP: 0.7
+ RPN_BATCHSIZE: 256
+ PROPOSAL_METHOD: gt
+ BG_THRESH_LO: 0.0
+ DISPLAY: 20
+ BATCH_SIZE: 256
+ DOUBLE_BIAS: False
+ SNAPSHOT_PREFIX: res101_faster_rcnn
+ SCALES: [800]
+ MAX_SIZE: 1333
+TEST:
+ HAS_RPN: True
+ SCALES: [800]
+ MAX_SIZE: 1333
+ RPN_POST_NMS_TOP_N: 1000
+POOLING_MODE: crop
+ANCHOR_SCALES: [2,4,8,16,32]
diff --git a/experiments/cfgs/res101.yml b/experiments/cfgs/res101.yml
new file mode 100644
index 0000000..367cad3
--- /dev/null
+++ b/experiments/cfgs/res101.yml
@@ -0,0 +1,16 @@
+EXP_DIR: res101
+TRAIN:
+ HAS_RPN: True
+ IMS_PER_BATCH: 1
+ BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
+ RPN_POSITIVE_OVERLAP: 0.7
+ RPN_BATCHSIZE: 256
+ PROPOSAL_METHOD: gt
+ BG_THRESH_LO: 0.0
+ DISPLAY: 20
+ BATCH_SIZE: 256
+ DOUBLE_BIAS: False
+ SNAPSHOT_PREFIX: res101_faster_rcnn
+TEST:
+ HAS_RPN: True
+POOLING_MODE: crop
diff --git a/experiments/cfgs/res50.yml b/experiments/cfgs/res50.yml
new file mode 100644
index 0000000..7b38b2a
--- /dev/null
+++ b/experiments/cfgs/res50.yml
@@ -0,0 +1,16 @@
+EXP_DIR: res50
+TRAIN:
+ HAS_RPN: True
+ IMS_PER_BATCH: 1
+ BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
+ RPN_POSITIVE_OVERLAP: 0.7
+ RPN_BATCHSIZE: 256
+ PROPOSAL_METHOD: gt
+ BG_THRESH_LO: 0.0
+ DISPLAY: 20
+ BATCH_SIZE: 256
+ DOUBLE_BIAS: False
+ SNAPSHOT_PREFIX: res50_faster_rcnn
+TEST:
+ HAS_RPN: True
+POOLING_MODE: crop
diff --git a/experiments/cfgs/vgg16.yml b/experiments/cfgs/vgg16.yml
new file mode 100644
index 0000000..d13ede6
--- /dev/null
+++ b/experiments/cfgs/vgg16.yml
@@ -0,0 +1,25 @@
+EXP_DIR: vgg16_MELM
+TRAIN:
+ HAS_RPN: False
+ IMS_PER_BATCH: 1
+ BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
+ RPN_POSITIVE_OVERLAP: 0.7
+ RPN_BATCHSIZE: 256
+ PROPOSAL_METHOD: selective_search
+ BG_THRESH_LO: 0.0
+ DISPLAY: 20
+ BATCH_SIZE: 256
+ SNAPSHOT_PREFIX: vgg16_MELM
+ LEARNING_RATE: 0.001 # 0.001
+ WEIGHT_DECAY: 0.0005
+ SCALES: [480, 576, 688, 864, 1200]
+ MAX_SIZE: 2000
+ STEPSIZE: [50000,]
+ MIL_RECURRENT_STEP: 20000
+ MIL_RECURRECT_WEIGHT: 0.09
+TEST:
+ HAS_RPN: False
+ PROPOSAL_METHOD: selective_search
+ NMS: 0.3
+ SCALES: [480, 576, 688, 864, 1200]
+POOLING_MODE: roi_align
diff --git a/experiments/scripts/convert_vgg16.sh b/experiments/scripts/convert_vgg16.sh
new file mode 100644
index 0000000..7fae1b4
--- /dev/null
+++ b/experiments/scripts/convert_vgg16.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+set -x
+set -e
+
+export PYTHONUNBUFFERED="True"
+
+GPU_ID=$1
+DATASET=$2
+NET=vgg16
+
+array=( $@ )
+len=${#array[@]}
+EXTRA_ARGS=${array[@]:2:$len}
+EXTRA_ARGS_SLUG=${EXTRA_ARGS// /_}
+
+case ${DATASET} in
+ pascal_voc)
+ TRAIN_IMDB="voc_2007_trainval"
+ TEST_IMDB="voc_2007_test"
+ ITERS=70000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ pascal_voc_0712)
+ TRAIN_IMDB="voc_2007_trainval+voc_2012_trainval"
+ TEST_IMDB="voc_2007_test"
+ ITERS=110000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ coco)
+ TRAIN_IMDB="coco_2014_train+coco_2014_valminusminival"
+ TEST_IMDB="coco_2014_minival"
+ ITERS=490000
+ ANCHORS="[4,8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ *)
+ echo "No dataset given"
+ exit
+ ;;
+esac
+
+set +x
+NET_FINAL=${NET}_faster_rcnn_iter_${ITERS}
+set -x
+
+if [ ! -f ${NET_FINAL}.index ]; then
+ if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ CUDA_VISIBLE_DEVICES=${GPU_ID} time python ./tools/convert_from_depre.py \
+ --snapshot ${NET_FINAL} \
+ --imdb ${TRAIN_IMDB} \
+ --iters ${ITERS} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --tag ${EXTRA_ARGS_SLUG} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} ${EXTRA_ARGS}
+ else
+ CUDA_VISIBLE_DEVICES=${GPU_ID} time python ./tools/convert_from_depre.py \
+ --snapshot ${NET_FINAL} \
+ --imdb ${TRAIN_IMDB} \
+ --iters ${ITERS} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} ${EXTRA_ARGS}
+ fi
+fi
+
diff --git a/experiments/scripts/test_faster_rcnn.sh b/experiments/scripts/test_faster_rcnn.sh
new file mode 100644
index 0000000..d613656
--- /dev/null
+++ b/experiments/scripts/test_faster_rcnn.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+set -x
+set -e
+
+export PYTHONUNBUFFERED="True"
+
+GPU_ID=$1
+DATASET=$2
+NET=$3
+
+array=( $@ )
+len=${#array[@]}
+EXTRA_ARGS=${array[@]:3:$len}
+EXTRA_ARGS_SLUG=${EXTRA_ARGS// /_}
+
+case ${DATASET} in
+ pascal_voc)
+ TRAIN_IMDB="voc_2007_trainval"
+ TEST_IMDB="voc_2007_test"
+ ITERS=100000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ pascal_voc_0712)
+ TRAIN_IMDB="voc_2007_trainval+voc_2012_trainval"
+ TEST_IMDB="voc_2007_test"
+ ITERS=110000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ coco)
+ TRAIN_IMDB="coco_2014_train+coco_2014_valminusminival"
+ TEST_IMDB="coco_2014_minival"
+ ITERS=490000
+ ANCHORS="[4,8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ *)
+ echo "No dataset given"
+ exit
+ ;;
+esac
+
+LOG="experiments/logs/test_${NET}_${TRAIN_IMDB}_${EXTRA_ARGS_SLUG}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
+exec &> >(tee -a "$LOG")
+echo Logging output to "$LOG"
+
+set +x
+if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ NET_FINAL=output/${NET}_MELM/${TRAIN_IMDB}/${EXTRA_ARGS_SLUG}/${NET}_MELM_iter_${ITERS}.pth
+else
+ NET_FINAL=output/${NET}_MELM/${TRAIN_IMDB}/default/${NET}_MELM_iter_${ITERS}.pth
+
+fi
+set -x
+
+if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/test_net.py \
+ --imdb ${TEST_IMDB} \
+ --model ${NET_FINAL} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --tag ${EXTRA_ARGS_SLUG} \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ ${EXTRA_ARGS}
+else
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/test_net.py \
+ --imdb ${TEST_IMDB} \
+ --model ${NET_FINAL} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ ${EXTRA_ARGS}
+fi
+
diff --git a/experiments/scripts/test_faster_rcnn_notime.sh b/experiments/scripts/test_faster_rcnn_notime.sh
new file mode 100644
index 0000000..53f00e3
--- /dev/null
+++ b/experiments/scripts/test_faster_rcnn_notime.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+set -x
+set -e
+
+export PYTHONUNBUFFERED="True"
+
+GPU_ID=$1
+DATASET=$2
+NET=$3
+
+array=( $@ )
+len=${#array[@]}
+EXTRA_ARGS=${array[@]:3:$len}
+EXTRA_ARGS_SLUG=${EXTRA_ARGS// /_}
+
+case ${DATASET} in
+ pascal_voc)
+ TRAIN_IMDB="voc_2007_trainval"
+ TEST_IMDB="voc_2007_test"
+ ITERS=70000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ pascal_voc_0712)
+ TRAIN_IMDB="voc_2007_trainval+voc_2012_trainval"
+ TEST_IMDB="voc_2007_test"
+ ITERS=110000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ coco)
+ TRAIN_IMDB="coco_2014_train+coco_2014_valminusminival"
+ TEST_IMDB="coco_2014_minival"
+ ITERS=490000
+ ANCHORS="[4,8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ *)
+ echo "No dataset given"
+ exit
+ ;;
+esac
+
+LOG="experiments/logs/test_${NET}_${TRAIN_IMDB}_${EXTRA_ARGS_SLUG}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
+exec &> >(tee -a "$LOG")
+echo Logging output to "$LOG"
+
+set +x
+if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ NET_FINAL=output/${NET}/${TRAIN_IMDB}/${EXTRA_ARGS_SLUG}/${NET}_faster_rcnn_iter_${ITERS}.pth
+else
+ NET_FINAL=output/${NET}/${TRAIN_IMDB}/default/${NET}_faster_rcnn_iter_${ITERS}.pth
+fi
+set -x
+
+if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/test_net.py \
+ --imdb ${TEST_IMDB} \
+ --model ${NET_FINAL} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --tag ${EXTRA_ARGS_SLUG} \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ ${EXTRA_ARGS}
+else
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/test_net.py \
+ --imdb ${TEST_IMDB} \
+ --model ${NET_FINAL} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ ${EXTRA_ARGS}
+fi
+
diff --git a/experiments/scripts/train_faster_rcnn.sh b/experiments/scripts/train_faster_rcnn.sh
new file mode 100644
index 0000000..ca1a081
--- /dev/null
+++ b/experiments/scripts/train_faster_rcnn.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+alias time='/usr/bin/time'
+
+set -x
+set -e
+
+export PYTHONUNBUFFERED="True"
+
+GPU_ID=$1
+DATASET=$2
+NET=$3
+
+array=( $@ )
+len=${#array[@]}
+EXTRA_ARGS=${array[@]:3:$len}
+EXTRA_ARGS_SLUG=${EXTRA_ARGS// /_}
+
+case ${DATASET} in
+ pascal_voc)
+ TRAIN_IMDB="voc_2007_trainval"
+ TEST_IMDB="voc_2007_test"
+ STEPSIZE="[50000]"
+ ITERS=100000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ pascal_voc_0712)
+ TRAIN_IMDB="voc_2007_trainval+voc_2012_trainval"
+ TEST_IMDB="voc_2007_test"
+ STEPSIZE="[80000]"
+ ITERS=110000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ coco)
+ TRAIN_IMDB="coco_2014_train+coco_2014_valminusminival"
+ TEST_IMDB="coco_2014_minival"
+ STEPSIZE="[350000]"
+ ITERS=490000
+ ANCHORS="[4,8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ *)
+ echo "No dataset given"
+ exit
+ ;;
+esac
+
+LOG="experiments/logs/${NET}_${TRAIN_IMDB}_${EXTRA_ARGS_SLUG}_${NET}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
+exec &> >(tee -a "$LOG")
+echo Logging output to "$LOG"
+
+set +x
+if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ NET_FINAL=output/${NET}/${TRAIN_IMDB}/${EXTRA_ARGS_SLUG}/${NET}_MELM_iter_${ITERS}.pth
+else
+ NET_FINAL=output/${NET}/${TRAIN_IMDB}/default/${NET}_MELM_iter_${ITERS}.pth
+fi
+set -x
+
+if [ ! -f ${NET_FINAL}.index ]; then
+ if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/trainval_net.py \
+ --weight data/imagenet_weights/${NET}.pth \
+ --imdb ${TRAIN_IMDB} \
+ --imdbval ${TEST_IMDB} \
+ --iters ${ITERS} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --tag ${EXTRA_ARGS_SLUG} \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ TRAIN.STEPSIZE ${STEPSIZE} ${EXTRA_ARGS}
+ else
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/trainval_net.py \
+ --weight data/imagenet_weights/${NET}.pth \
+ --imdb ${TRAIN_IMDB} \
+ --imdbval ${TEST_IMDB} \
+ --iters ${ITERS} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ TRAIN.STEPSIZE ${STEPSIZE} ${EXTRA_ARGS}
+ fi
+fi
+
+echo $@
+./experiments/scripts/test_faster_rcnn.sh $@
diff --git a/experiments/scripts/train_faster_rcnn_notime.sh b/experiments/scripts/train_faster_rcnn_notime.sh
new file mode 100644
index 0000000..eddd772
--- /dev/null
+++ b/experiments/scripts/train_faster_rcnn_notime.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+set -x
+set -e
+
+export PYTHONUNBUFFERED="True"
+
+GPU_ID=$1
+DATASET=$2
+NET=$3
+
+array=( $@ )
+len=${#array[@]}
+EXTRA_ARGS=${array[@]:3:$len}
+EXTRA_ARGS_SLUG=${EXTRA_ARGS// /_}
+
+case ${DATASET} in
+ pascal_voc)
+ TRAIN_IMDB="voc_2007_trainval"
+ TEST_IMDB="voc_2007_test"
+ STEPSIZE="[50000]"
+ ITERS=70000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ pascal_voc_0712)
+ TRAIN_IMDB="voc_2007_trainval+voc_2012_trainval"
+ TEST_IMDB="voc_2007_test"
+ STEPSIZE="[80000]"
+ ITERS=110000
+ ANCHORS="[8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ coco)
+ TRAIN_IMDB="coco_2014_train+coco_2014_valminusminival"
+ TEST_IMDB="coco_2014_minival"
+ STEPSIZE="[900000]"
+ ITERS=1190000
+ ANCHORS="[4,8,16,32]"
+ RATIOS="[0.5,1,2]"
+ ;;
+ *)
+ echo "No dataset given"
+ exit
+ ;;
+esac
+
+LOG="experiments/logs/${NET}_${TRAIN_IMDB}_${EXTRA_ARGS_SLUG}_${NET}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
+exec &> >(tee -a "$LOG")
+echo Logging output to "$LOG"
+
+set +x
+if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ NET_FINAL=output/${NET}/${TRAIN_IMDB}/${EXTRA_ARGS_SLUG}/${NET}_faster_rcnn_iter_${ITERS}.pth
+else
+ NET_FINAL=output/${NET}/${TRAIN_IMDB}/default/${NET}_faster_rcnn_iter_${ITERS}.pth
+fi
+set -x
+
+if [ ! -f ${NET_FINAL}.index ]; then
+ if [[ ! -z ${EXTRA_ARGS_SLUG} ]]; then
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/trainval_net.py \
+ --weight data/imagenet_weights/${NET}.pth \
+ --imdb ${TRAIN_IMDB} \
+ --imdbval ${TEST_IMDB} \
+ --iters ${ITERS} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --tag ${EXTRA_ARGS_SLUG} \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ TRAIN.STEPSIZE ${STEPSIZE} ${EXTRA_ARGS}
+ else
+ CUDA_VISIBLE_DEVICES=${GPU_ID} python ./tools/trainval_net.py \
+ --weight data/imagenet_weights/${NET}.pth \
+ --imdb ${TRAIN_IMDB} \
+ --imdbval ${TEST_IMDB} \
+ --iters ${ITERS} \
+ --cfg experiments/cfgs/${NET}.yml \
+ --net ${NET} \
+ --set ANCHOR_SCALES ${ANCHORS} ANCHOR_RATIOS ${RATIOS} \
+ TRAIN.STEPSIZE ${STEPSIZE} ${EXTRA_ARGS}
+ fi
+fi
+
+./experiments/scripts/test_faster_rcnn_notime.sh $@
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..64e309d
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..f3e785a
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..39168d0
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/roi_align/roi_align_cuda.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/roi_align/roi_align_cuda.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..a5d45b7
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/roi_align/roi_align_cuda.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/roi_crop/roi_crop_cpu.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/roi_crop/roi_crop_cpu.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..f4bcc00
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/roi_crop/roi_crop_cpu.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/roi_crop/roi_crop_cuda.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/roi_crop/roi_crop_cuda.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..7e4de05
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/roi_crop/roi_crop_cuda.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/roi_pool/roi_pool_cuda.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/roi_pool/roi_pool_cuda.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..25aed8a
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/roi_pool/roi_pool_cuda.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/lib.linux-x86_64-3.7/ops/roi_ring_pool/roi_ring_pool_cuda.cpython-37m-x86_64-linux-gnu.so b/lib/build/lib.linux-x86_64-3.7/ops/roi_ring_pool/roi_ring_pool_cuda.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..69ee1a3
Binary files /dev/null and b/lib/build/lib.linux-x86_64-3.7/ops/roi_ring_pool/roi_ring_pool_cuda.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_cpu.o b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_cpu.o
new file mode 100644
index 0000000..a657583
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_cpu.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_cuda.o b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_cuda.o
new file mode 100644
index 0000000..85fd57c
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_cuda.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_kernel.o b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_kernel.o
new file mode 100644
index 0000000..3e3b755
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/nms_kernel.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/nms/src/soft_nms_cpu.o b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/soft_nms_cpu.o
new file mode 100644
index 0000000..655e012
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/nms/src/soft_nms_cpu.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_align/src/roi_align_cuda.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_align/src/roi_align_cuda.o
new file mode 100644
index 0000000..9a0771f
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_align/src/roi_align_cuda.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_align/src/roi_align_kernel.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_align/src/roi_align_kernel.o
new file mode 100644
index 0000000..b75ed35
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_align/src/roi_align_kernel.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_cpu.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_cpu.o
new file mode 100644
index 0000000..36c508b
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_cpu.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_cuda.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_cuda.o
new file mode 100644
index 0000000..890af9b
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_cuda.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_kernel.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_kernel.o
new file mode 100644
index 0000000..3e568d7
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_crop/src/roi_crop_kernel.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_pool/src/roi_pool_cuda.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_pool/src/roi_pool_cuda.o
new file mode 100644
index 0000000..3df3ee6
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_pool/src/roi_pool_cuda.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_pool/src/roi_pool_kernel.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_pool/src/roi_pool_kernel.o
new file mode 100644
index 0000000..c9c9a61
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_pool/src/roi_pool_kernel.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_ring_pool/src/roi_ring_pool_cuda.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_ring_pool/src/roi_ring_pool_cuda.o
new file mode 100644
index 0000000..53d8a5d
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_ring_pool/src/roi_ring_pool_cuda.o differ
diff --git a/lib/build/temp.linux-x86_64-3.7/ops/roi_ring_pool/src/roi_ring_pool_kernel.o b/lib/build/temp.linux-x86_64-3.7/ops/roi_ring_pool/src/roi_ring_pool_kernel.o
new file mode 100644
index 0000000..810d532
Binary files /dev/null and b/lib/build/temp.linux-x86_64-3.7/ops/roi_ring_pool/src/roi_ring_pool_kernel.o differ
diff --git a/lib/datasets/VOCdevkit-matlab-wrapper/get_voc_opts.m b/lib/datasets/VOCdevkit-matlab-wrapper/get_voc_opts.m
new file mode 100644
index 0000000..629597a
--- /dev/null
+++ b/lib/datasets/VOCdevkit-matlab-wrapper/get_voc_opts.m
@@ -0,0 +1,14 @@
+function VOCopts = get_voc_opts(path)
+
+tmp = pwd;
+cd(path);
+try
+ addpath('VOCcode');
+ VOCinit;
+catch
+ rmpath('VOCcode');
+ cd(tmp);
+ error(sprintf('VOCcode directory not found under %s', path));
+end
+rmpath('VOCcode');
+cd(tmp);
diff --git a/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m b/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m
new file mode 100644
index 0000000..1911a0e
--- /dev/null
+++ b/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m
@@ -0,0 +1,56 @@
+function res = voc_eval(path, comp_id, test_set, output_dir)
+
+VOCopts = get_voc_opts(path);
+VOCopts.testset = test_set;
+
+for i = 1:length(VOCopts.classes)
+ cls = VOCopts.classes{i};
+ res(i) = voc_eval_cls(cls, VOCopts, comp_id, output_dir);
+end
+
+fprintf('\n~~~~~~~~~~~~~~~~~~~~\n');
+fprintf('Results:\n');
+aps = [res(:).ap]';
+fprintf('%.1f\n', aps * 100);
+fprintf('%.1f\n', mean(aps) * 100);
+fprintf('~~~~~~~~~~~~~~~~~~~~\n');
+
+function res = voc_eval_cls(cls, VOCopts, comp_id, output_dir)
+
+test_set = VOCopts.testset;
+year = VOCopts.dataset(4:end);
+
+addpath(fullfile(VOCopts.datadir, 'VOCcode'));
+
+res_fn = sprintf(VOCopts.detrespath, comp_id, cls);
+
+recall = [];
+prec = [];
+ap = 0;
+ap_auc = 0;
+
+do_eval = (str2num(year) <= 2007) | ~strcmp(test_set, 'test');
+if do_eval
+ % Bug in VOCevaldet requires that tic has been called first
+ tic;
+ [recall, prec, ap] = VOCevaldet(VOCopts, comp_id, cls, true);
+ ap_auc = xVOCap(recall, prec);
+
+ % force plot limits
+ ylim([0 1]);
+ xlim([0 1]);
+
+ print(gcf, '-djpeg', '-r0', ...
+ [output_dir '/' cls '_pr.jpg']);
+end
+fprintf('!!! %s : %.4f %.4f\n', cls, ap, ap_auc);
+
+res.recall = recall;
+res.prec = prec;
+res.ap = ap;
+res.ap_auc = ap_auc;
+
+save([output_dir '/' cls '_pr.mat'], ...
+ 'res', 'recall', 'prec', 'ap', 'ap_auc');
+
+rmpath(fullfile(VOCopts.datadir, 'VOCcode'));
diff --git a/lib/datasets/VOCdevkit-matlab-wrapper/xVOCap.m b/lib/datasets/VOCdevkit-matlab-wrapper/xVOCap.m
new file mode 100644
index 0000000..de6c628
--- /dev/null
+++ b/lib/datasets/VOCdevkit-matlab-wrapper/xVOCap.m
@@ -0,0 +1,10 @@
+function ap = xVOCap(rec,prec)
+% From the PASCAL VOC 2011 devkit
+
+mrec=[0 ; rec ; 1];
+mpre=[0 ; prec ; 0];
+for i=numel(mpre)-1:-1:1
+ mpre(i)=max(mpre(i),mpre(i+1));
+end
+i=find(mrec(2:end)~=mrec(1:end-1))+1;
+ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
diff --git a/lib/datasets/__init__.py b/lib/datasets/__init__.py
new file mode 100644
index 0000000..7ba6a65
--- /dev/null
+++ b/lib/datasets/__init__.py
@@ -0,0 +1,6 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
diff --git a/lib/datasets/__pycache__/__init__.cpython-36.pyc b/lib/datasets/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000..1c88744
Binary files /dev/null and b/lib/datasets/__pycache__/__init__.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/__init__.cpython-37.pyc b/lib/datasets/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000..46d1ce0
Binary files /dev/null and b/lib/datasets/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/datasets/__pycache__/coco.cpython-36.pyc b/lib/datasets/__pycache__/coco.cpython-36.pyc
new file mode 100644
index 0000000..e35f6bb
Binary files /dev/null and b/lib/datasets/__pycache__/coco.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/coco.cpython-37.pyc b/lib/datasets/__pycache__/coco.cpython-37.pyc
new file mode 100644
index 0000000..469143c
Binary files /dev/null and b/lib/datasets/__pycache__/coco.cpython-37.pyc differ
diff --git a/lib/datasets/__pycache__/ds_utils.cpython-36.pyc b/lib/datasets/__pycache__/ds_utils.cpython-36.pyc
new file mode 100644
index 0000000..223a180
Binary files /dev/null and b/lib/datasets/__pycache__/ds_utils.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/ds_utils.cpython-37.pyc b/lib/datasets/__pycache__/ds_utils.cpython-37.pyc
new file mode 100644
index 0000000..dd7faeb
Binary files /dev/null and b/lib/datasets/__pycache__/ds_utils.cpython-37.pyc differ
diff --git a/lib/datasets/__pycache__/factory.cpython-36.pyc b/lib/datasets/__pycache__/factory.cpython-36.pyc
new file mode 100644
index 0000000..9326172
Binary files /dev/null and b/lib/datasets/__pycache__/factory.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/factory.cpython-37.pyc b/lib/datasets/__pycache__/factory.cpython-37.pyc
new file mode 100644
index 0000000..748a431
Binary files /dev/null and b/lib/datasets/__pycache__/factory.cpython-37.pyc differ
diff --git a/lib/datasets/__pycache__/imdb.cpython-36.pyc b/lib/datasets/__pycache__/imdb.cpython-36.pyc
new file mode 100644
index 0000000..545cfe1
Binary files /dev/null and b/lib/datasets/__pycache__/imdb.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/imdb.cpython-37.pyc b/lib/datasets/__pycache__/imdb.cpython-37.pyc
new file mode 100644
index 0000000..2069294
Binary files /dev/null and b/lib/datasets/__pycache__/imdb.cpython-37.pyc differ
diff --git a/lib/datasets/__pycache__/pascal_voc.cpython-36.pyc b/lib/datasets/__pycache__/pascal_voc.cpython-36.pyc
new file mode 100644
index 0000000..559fb14
Binary files /dev/null and b/lib/datasets/__pycache__/pascal_voc.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/pascal_voc.cpython-37.pyc b/lib/datasets/__pycache__/pascal_voc.cpython-37.pyc
new file mode 100644
index 0000000..9ba1390
Binary files /dev/null and b/lib/datasets/__pycache__/pascal_voc.cpython-37.pyc differ
diff --git a/lib/datasets/__pycache__/voc_eval.cpython-36.pyc b/lib/datasets/__pycache__/voc_eval.cpython-36.pyc
new file mode 100644
index 0000000..c8147d2
Binary files /dev/null and b/lib/datasets/__pycache__/voc_eval.cpython-36.pyc differ
diff --git a/lib/datasets/__pycache__/voc_eval.cpython-37.pyc b/lib/datasets/__pycache__/voc_eval.cpython-37.pyc
new file mode 100644
index 0000000..f899d4a
Binary files /dev/null and b/lib/datasets/__pycache__/voc_eval.cpython-37.pyc differ
diff --git a/lib/datasets/coco.py b/lib/datasets/coco.py
new file mode 100644
index 0000000..fba89f0
--- /dev/null
+++ b/lib/datasets/coco.py
@@ -0,0 +1,316 @@
+# --------------------------------------------------------
+# Fast/er R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick and Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from datasets.imdb import imdb
+import datasets.ds_utils as ds_utils
+from model.config import cfg
+import os.path as osp
+import sys
+import os
+import numpy as np
+import scipy.sparse
+import scipy.io as sio
+import pickle
+import json
+import uuid
+# COCO API
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+from pycocotools import mask as COCOmask
+
+class coco(imdb):
+ def __init__(self, image_set, year):
+ imdb.__init__(self, 'coco_' + year + '_' + image_set)
+ # COCO specific config options
+ self.config = {'use_salt': True,
+ 'cleanup': True}
+ # name, paths
+ self._year = year
+ self._image_set = image_set
+ self._data_path = osp.join(cfg.DATA_DIR, 'coco')
+ # load COCO API, classes, class <-> id mappings
+ self._COCO = COCO(self._get_ann_file())
+ cats = self._COCO.loadCats(self._COCO.getCatIds())
+ self._classes = tuple(['__background__'] + [c['name'] for c in cats])
+ self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
+ self._class_to_coco_cat_id = dict(list(zip([c['name'] for c in cats],
+ self._COCO.getCatIds())))
+ self._image_index = self._load_image_set_index()
+ # Default to roidb handler
+ self.set_proposal_method('gt')
+ self.competition_mode(False)
+
+ # Some image sets are "views" (i.e. subsets) into others.
+ # For example, minival2014 is a random 5000 image subset of val2014.
+ # This mapping tells us where the view's images and proposals come from.
+ self._view_map = {
+ 'minival2014': 'val2014', # 5k val2014 subset
+ 'valminusminival2014': 'val2014', # val2014 \setminus minival2014
+ 'test-dev2015': 'test2015',
+ }
+ coco_name = image_set + year # e.g., "val2014"
+ self._data_name = (self._view_map[coco_name]
+ if coco_name in self._view_map
+ else coco_name)
+ # Dataset splits that have ground-truth annotations (test splits
+ # do not have gt annotations)
+ self._gt_splits = ('train', 'val', 'minival')
+
+ def _get_ann_file(self):
+ prefix = 'instances' if self._image_set.find('test') == -1 \
+ else 'image_info'
+ return osp.join(self._data_path, 'annotations',
+ prefix + '_' + self._image_set + self._year + '.json')
+
+ def _load_image_set_index(self):
+ """
+ Load image ids.
+ """
+ image_ids = self._COCO.getImgIds()
+ return image_ids
+
+ def _get_widths(self):
+ anns = self._COCO.loadImgs(self._image_index)
+ widths = [ann['width'] for ann in anns]
+ return widths
+
+ def image_path_at(self, i):
+ """
+ Return the absolute path to image i in the image sequence.
+ """
+ return self.image_path_from_index(self._image_index[i])
+
+ def image_path_from_index(self, index):
+ """
+ Construct an image path from the image's "index" identifier.
+ """
+ # Example image path for index=119993:
+ # images/train2014/COCO_train2014_000000119993.jpg
+ file_name = ('COCO_' + self._data_name + '_' +
+ str(index).zfill(12) + '.jpg')
+ image_path = osp.join(self._data_path, 'images',
+ self._data_name, file_name)
+ assert osp.exists(image_path), \
+ 'Path does not exist: {}'.format(image_path)
+ return image_path
+
+ def gt_roidb(self):
+ """
+ Return the database of ground-truth regions of interest.
+ This function loads/saves from/to a cache file to speed up future calls.
+ """
+ cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
+ if osp.exists(cache_file):
+ with open(cache_file, 'rb') as fid:
+ roidb = pickle.load(fid)
+ print('{} gt roidb loaded from {}'.format(self.name, cache_file))
+ return roidb
+
+ gt_roidb = [self._load_coco_annotation(index)
+ for index in self._image_index]
+
+ with open(cache_file, 'wb') as fid:
+ pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
+ print('wrote gt roidb to {}'.format(cache_file))
+ return gt_roidb
+
+ def _load_coco_annotation(self, index):
+ """
+ Loads COCO bounding-box instance annotations. Crowd instances are
+ handled by marking their overlaps (with all categories) to -1. This
+ overlap value means that crowd "instances" are excluded from training.
+ """
+ im_ann = self._COCO.loadImgs(index)[0]
+ width = im_ann['width']
+ height = im_ann['height']
+
+ annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
+ objs = self._COCO.loadAnns(annIds)
+ # Sanitize bboxes -- some are invalid
+ valid_objs = []
+ for obj in objs:
+ x1 = np.max((0, obj['bbox'][0]))
+ y1 = np.max((0, obj['bbox'][1]))
+ x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
+ y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
+ if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
+ obj['clean_bbox'] = [x1, y1, x2, y2]
+ valid_objs.append(obj)
+ objs = valid_objs
+ num_objs = len(objs)
+
+ boxes = np.zeros((num_objs, 4), dtype=np.uint16)
+ gt_classes = np.zeros((num_objs), dtype=np.int32)
+ overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
+ seg_areas = np.zeros((num_objs), dtype=np.float32)
+
+ # Lookup table to map from COCO category ids to our internal class
+ # indices
+ coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
+ self._class_to_ind[cls])
+ for cls in self._classes[1:]])
+
+ for ix, obj in enumerate(objs):
+ cls = coco_cat_id_to_class_ind[obj['category_id']]
+ boxes[ix, :] = obj['clean_bbox']
+ gt_classes[ix] = cls
+ seg_areas[ix] = obj['area']
+ if obj['iscrowd']:
+ # Set overlap to -1 for all classes for crowd objects
+ # so they will be excluded during training
+ overlaps[ix, :] = -1.0
+ else:
+ overlaps[ix, cls] = 1.0
+
+ ds_utils.validate_boxes(boxes, width=width, height=height)
+ overlaps = scipy.sparse.csr_matrix(overlaps)
+ return {'width': width,
+ 'height': height,
+ 'boxes': boxes,
+ 'gt_classes': gt_classes,
+ 'gt_overlaps': overlaps,
+ 'flipped': False,
+ 'seg_areas': seg_areas}
+
+ def _get_widths(self):
+ return [r['width'] for r in self.roidb]
+
+ def append_flipped_images(self):
+ num_images = self.num_images
+ widths = self._get_widths()
+ for i in range(num_images):
+ boxes = self.roidb[i]['boxes'].copy()
+ oldx1 = boxes[:, 0].copy()
+ oldx2 = boxes[:, 2].copy()
+ boxes[:, 0] = widths[i] - oldx2 - 1
+ boxes[:, 2] = widths[i] - oldx1 - 1
+ assert (boxes[:, 2] >= boxes[:, 0]).all()
+ entry = {'width': widths[i],
+ 'height': self.roidb[i]['height'],
+ 'boxes': boxes,
+ 'gt_classes': self.roidb[i]['gt_classes'],
+ 'gt_overlaps': self.roidb[i]['gt_overlaps'],
+ 'flipped': True,
+ 'seg_areas': self.roidb[i]['seg_areas']}
+
+ self.roidb.append(entry)
+ self._image_index = self._image_index * 2
+
+ def _get_box_file(self, index):
+ # first 14 chars / first 22 chars / all chars + .mat
+ # COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat
+ file_name = ('COCO_' + self._data_name +
+ '_' + str(index).zfill(12) + '.mat')
+ return osp.join(file_name[:14], file_name[:22], file_name)
+
+ def _print_detection_eval_metrics(self, coco_eval):
+ IoU_lo_thresh = 0.5
+ IoU_hi_thresh = 0.95
+
+ def _get_thr_ind(coco_eval, thr):
+ ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
+ (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
+ iou_thr = coco_eval.params.iouThrs[ind]
+ assert np.isclose(iou_thr, thr)
+ return ind
+
+ ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
+ ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
+ # precision has dims (iou, recall, cls, area range, max dets)
+ # area range index 0: all area ranges
+ # max dets index 2: 100 per image
+ precision = \
+ coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
+ ap_default = np.mean(precision[precision > -1])
+ print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
+ '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
+ print('{:.1f}'.format(100 * ap_default))
+ for cls_ind, cls in enumerate(self.classes):
+ if cls == '__background__':
+ continue
+ # minus 1 because of __background__
+ precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
+ ap = np.mean(precision[precision > -1])
+ print('{:.1f}'.format(100 * ap))
+
+ print('~~~~ Summary metrics ~~~~')
+ coco_eval.summarize()
+
+ def _do_detection_eval(self, res_file, output_dir):
+ ann_type = 'bbox'
+ coco_dt = self._COCO.loadRes(res_file)
+ coco_eval = COCOeval(self._COCO, coco_dt)
+ coco_eval.params.useSegm = (ann_type == 'segm')
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ self._print_detection_eval_metrics(coco_eval)
+ eval_file = osp.join(output_dir, 'detection_results.pkl')
+ with open(eval_file, 'wb') as fid:
+ pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
+ print('Wrote COCO eval results to: {}'.format(eval_file))
+
+ def _coco_results_one_category(self, boxes, cat_id):
+ results = []
+ for im_ind, index in enumerate(self.image_index):
+ dets = boxes[im_ind].astype(np.float)
+ if dets == []:
+ continue
+ scores = dets[:, -1]
+ xs = dets[:, 0]
+ ys = dets[:, 1]
+ ws = dets[:, 2] - xs + 1
+ hs = dets[:, 3] - ys + 1
+ results.extend(
+ [{'image_id': index,
+ 'category_id': cat_id,
+ 'bbox': [xs[k], ys[k], ws[k], hs[k]],
+ 'score': scores[k]} for k in range(dets.shape[0])])
+ return results
+
+ def _write_coco_results_file(self, all_boxes, res_file):
+ # [{"image_id": 42,
+ # "category_id": 18,
+ # "bbox": [258.15,41.29,348.26,243.78],
+ # "score": 0.236}, ...]
+ results = []
+ for cls_ind, cls in enumerate(self.classes):
+ if cls == '__background__':
+ continue
+ print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
+ self.num_classes - 1))
+ coco_cat_id = self._class_to_coco_cat_id[cls]
+ results.extend(self._coco_results_one_category(all_boxes[cls_ind],
+ coco_cat_id))
+ print('Writing results json to {}'.format(res_file))
+ with open(res_file, 'w') as fid:
+ json.dump(results, fid)
+
+ def evaluate_detections(self, all_boxes, output_dir):
+ res_file = osp.join(output_dir, ('detections_' +
+ self._image_set +
+ self._year +
+ '_results'))
+ if self.config['use_salt']:
+ res_file += '_{}'.format(str(uuid.uuid4()))
+ res_file += '.json'
+ self._write_coco_results_file(all_boxes, res_file)
+ # Only do evaluation on non-test sets
+ if self._image_set.find('test') == -1:
+ self._do_detection_eval(res_file, output_dir)
+ # Optionally cleanup results json file
+ if self.config['cleanup']:
+ os.remove(res_file)
+
+ def competition_mode(self, on):
+ if on:
+ self.config['use_salt'] = False
+ self.config['cleanup'] = False
+ else:
+ self.config['use_salt'] = True
+ self.config['cleanup'] = True
diff --git a/lib/datasets/ds_utils.py b/lib/datasets/ds_utils.py
new file mode 100644
index 0000000..fd5ca4b
--- /dev/null
+++ b/lib/datasets/ds_utils.py
@@ -0,0 +1,49 @@
+# --------------------------------------------------------
+# Fast/er R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+
+def unique_boxes(boxes, scale=1.0):
+ """Return indices of unique boxes."""
+ v = np.array([1, 1e3, 1e6, 1e9])
+ hashes = np.round(boxes * scale).dot(v)
+ _, index = np.unique(hashes, return_index=True)
+ return np.sort(index)
+
+
+def xywh_to_xyxy(boxes):
+ """Convert [x y w h] box format to [x1 y1 x2 y2] format."""
+ return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
+
+
+def xyxy_to_xywh(boxes):
+ """Convert [x1 y1 x2 y2] box format to [x y w h] format."""
+ return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
+
+
+def validate_boxes(boxes, width=0, height=0):
+ """Check that a set of boxes are valid."""
+ x1 = boxes[:, 0]
+ y1 = boxes[:, 1]
+ x2 = boxes[:, 2]
+ y2 = boxes[:, 3]
+ assert (x1 >= 0).all()
+ assert (y1 >= 0).all()
+ assert (x2 >= x1).all()
+ assert (y2 >= y1).all()
+ assert (x2 < width).all()
+ assert (y2 < height).all()
+
+
+def filter_small_boxes(boxes, min_size):
+ w = boxes[:, 2] - boxes[:, 0]
+ h = boxes[:, 3] - boxes[:, 1]
+ keep = np.where((w >= min_size) & (h > min_size))[0]
+ return keep
diff --git a/lib/datasets/factory.py b/lib/datasets/factory.py
new file mode 100644
index 0000000..c1f823b
--- /dev/null
+++ b/lib/datasets/factory.py
@@ -0,0 +1,57 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+
+"""Factory method for easily getting imdbs by name."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+__sets = {}
+from datasets.pascal_voc import pascal_voc
+
+#from datasets.coco import coco
+
+#from datasets.coco import coco
+
+import numpy as np
+
+# Set up voc__
+for year in ['2007', '2012']:
+ for split in ['train', 'val', 'trainval', 'test']:
+ name = 'voc_{}_{}'.format(year, split)
+ __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
+
+for year in ['2007', '2012']:
+ for split in ['train', 'val', 'trainval', 'test']:
+ name = 'voc_{}_{}_diff'.format(year, split)
+ __sets[name] = (lambda split=split, year=year: pascal_voc(split, year, use_diff=True))
+
+
+# Set up coco_2014_
+for year in ['2014']:
+ for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
+ name = 'coco_{}_{}'.format(year, split)
+ __sets[name] = (lambda split=split, year=year: coco(split, year))
+
+# Set up coco_2015_
+for year in ['2015']:
+ for split in ['test', 'test-dev']:
+ name = 'coco_{}_{}'.format(year, split)
+ __sets[name] = (lambda split=split, year=year: coco(split, year))
+
+
+
+def get_imdb(name):
+ """Get an imdb (image database) by name."""
+ if name not in __sets:
+ raise KeyError('Unknown dataset: {}'.format(name))
+ return __sets[name]()
+
+
+def list_imdbs():
+ """List all registered imdbs."""
+ return list(__sets.keys())
diff --git a/lib/datasets/imdb.py b/lib/datasets/imdb.py
new file mode 100644
index 0000000..03cea0a
--- /dev/null
+++ b/lib/datasets/imdb.py
@@ -0,0 +1,264 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick and Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import os.path as osp
+import PIL
+from utils.bbox import bbox_overlaps
+import numpy as np
+import scipy.sparse
+from model.config import cfg
+
+
+class imdb(object):
+ """Image database."""
+
+ def __init__(self, name, classes=None):
+ self._name = name
+ self._num_classes = 0
+ if not classes:
+ self._classes = []
+ else:
+ self._classes = classes
+ self._image_index = []
+ self._obj_proposer = 'selective_search'
+ self._roidb = None
+ self._roidb_handler = self.default_roidb
+ # Use this dict for storing dataset specific config options
+ self.config = {}
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def num_classes(self):
+ return len(self._classes)
+
+ @property
+ def classes(self):
+ return self._classes
+
+ @property
+ def image_index(self):
+ return self._image_index
+
+ @property
+ def roidb_handler(self):
+ return self._roidb_handler
+
+ @roidb_handler.setter
+ def roidb_handler(self, val):
+ self._roidb_handler = val
+
+ def set_proposal_method(self, method):
+ method = eval('self.' + method + '_roidb')
+ self.roidb_handler = method
+
+ @property
+ def roidb(self):
+ # A roidb is a list of dictionaries, each with the following keys:
+ # boxes
+ # gt_overlaps
+ # gt_classes
+ # flipped
+ if self._roidb is not None:
+ return self._roidb
+ self._roidb = self.roidb_handler()
+ return self._roidb
+
+ @property
+ def cache_path(self):
+ cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
+ if not os.path.exists(cache_path):
+ os.makedirs(cache_path)
+ return cache_path
+
+ @property
+ def num_images(self):
+ return len(self.image_index)
+
+ def image_path_at(self, i):
+ raise NotImplementedError
+
+ def default_roidb(self):
+ raise NotImplementedError
+
+ def evaluate_detections(self, all_boxes, output_dir=None):
+ """
+ all_boxes is a list of length number-of-classes.
+ Each list element is a list of length number-of-images.
+ Each of those list elements is either an empty list []
+ or a numpy array of detection.
+
+ all_boxes[class][image] = [] or np.array of shape #dets x 5
+ """
+ raise NotImplementedError
+
+ def _get_widths(self):
+ return [PIL.Image.open(self.image_path_at(i)).size[0]
+ for i in range(self.num_images)]
+
+ def append_flipped_images(self):
+ num_images = self.num_images
+ widths = self._get_widths()
+ for i in range(num_images):
+ boxes = self.roidb[i]['boxes'].copy()
+ oldx1 = boxes[:, 0].copy()
+ oldx2 = boxes[:, 2].copy()
+ boxes[:, 0] = widths[i] - oldx2 - 1
+ boxes[:, 2] = widths[i] - oldx1 - 1
+ assert (boxes[:, 2] >= boxes[:, 0]).all()
+ #print(self.roidb[i].keys())
+ entry = {'boxes': boxes,
+ 'gt_overlaps': self.roidb[i]['gt_overlaps'],
+ 'gt_classes': self.roidb[i]['gt_classes'],
+ 'flipped': True,
+ 'image_level_labels':self.roidb[i]['image_level_labels']}
+ self.roidb.append(entry)
+ self._image_index = self._image_index * 2
+
+ def evaluate_recall(self, candidate_boxes=None, thresholds=None,
+ area='all', limit=None):
+ """Evaluate detection proposal recall metrics.
+
+ Returns:
+ results: dictionary of results with keys
+ 'ar': average recall
+ 'recalls': vector recalls at each IoU overlap threshold
+ 'thresholds': vector of IoU overlap thresholds
+ 'gt_overlaps': vector of all ground-truth overlaps
+ """
+ # Record max overlap value for each gt box
+ # Return vector of overlap values
+ areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,
+ '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
+ area_ranges = [[0 ** 2, 1e5 ** 2], # all
+ [0 ** 2, 32 ** 2], # small
+ [32 ** 2, 96 ** 2], # medium
+ [96 ** 2, 1e5 ** 2], # large
+ [96 ** 2, 128 ** 2], # 96-128
+ [128 ** 2, 256 ** 2], # 128-256
+ [256 ** 2, 512 ** 2], # 256-512
+ [512 ** 2, 1e5 ** 2], # 512-inf
+ ]
+ assert area in areas, 'unknown area range: {}'.format(area)
+ area_range = area_ranges[areas[area]]
+ gt_overlaps = np.zeros(0)
+ num_pos = 0
+ for i in range(self.num_images):
+ # Checking for max_overlaps == 1 avoids including crowd annotations
+ # (...pretty hacking :/)
+ max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
+ gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
+ (max_gt_overlaps == 1))[0]
+ gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
+ gt_areas = self.roidb[i]['seg_areas'][gt_inds]
+ valid_gt_inds = np.where((gt_areas >= area_range[0]) &
+ (gt_areas <= area_range[1]))[0]
+ gt_boxes = gt_boxes[valid_gt_inds, :]
+ num_pos += len(valid_gt_inds)
+
+ if candidate_boxes is None:
+ # If candidate_boxes is not supplied, the default is to use the
+ # non-ground-truth boxes from this roidb
+ non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
+ boxes = self.roidb[i]['boxes'][non_gt_inds, :]
+ else:
+ boxes = candidate_boxes[i]
+ if boxes.shape[0] == 0:
+ continue
+ if limit is not None and boxes.shape[0] > limit:
+ boxes = boxes[:limit, :]
+
+ overlaps = bbox_overlaps(boxes.astype(np.float),
+ gt_boxes.astype(np.float))
+
+ _gt_overlaps = np.zeros((gt_boxes.shape[0]))
+ for j in range(gt_boxes.shape[0]):
+ # find which proposal box maximally covers each gt box
+ argmax_overlaps = overlaps.argmax(axis=0)
+ # and get the iou amount of coverage for each gt box
+ max_overlaps = overlaps.max(axis=0)
+ # find which gt box is 'best' covered (i.e. 'best' = most iou)
+ gt_ind = max_overlaps.argmax()
+ gt_ovr = max_overlaps.max()
+ assert (gt_ovr >= 0)
+ # find the proposal box that covers the best covered gt box
+ box_ind = argmax_overlaps[gt_ind]
+ # record the iou coverage of this gt box
+ _gt_overlaps[j] = overlaps[box_ind, gt_ind]
+ assert (_gt_overlaps[j] == gt_ovr)
+ # mark the proposal box and the gt box as used
+ overlaps[box_ind, :] = -1
+ overlaps[:, gt_ind] = -1
+ # append recorded iou coverage level
+ gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
+
+ gt_overlaps = np.sort(gt_overlaps)
+ if thresholds is None:
+ step = 0.05
+ thresholds = np.arange(0.5, 0.95 + 1e-5, step)
+ recalls = np.zeros_like(thresholds)
+ # compute recall for each iou threshold
+ for i, t in enumerate(thresholds):
+ recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
+ # ar = 2 * np.trapz(recalls, thresholds)
+ ar = recalls.mean()
+ return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
+ 'gt_overlaps': gt_overlaps}
+
+ def create_roidb_from_box_list(self, box_list, gt_roidb):
+ assert len(box_list) == self.num_images, \
+ 'Number of boxes must match number of ground-truth images'
+ roidb = []
+ for i in range(self.num_images):
+ boxes = box_list[i]
+ num_boxes = boxes.shape[0]
+ overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
+
+ if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
+ gt_boxes = gt_roidb[i]['boxes']
+ gt_classes = gt_roidb[i]['gt_classes']
+ gt_overlaps = bbox_overlaps(boxes.astype(np.float),
+ gt_boxes.astype(np.float))
+ argmaxes = gt_overlaps.argmax(axis=1)
+ maxes = gt_overlaps.max(axis=1)
+ I = np.where(maxes > 0)[0]
+ overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
+
+ overlaps = scipy.sparse.csr_matrix(overlaps)
+ roidb.append({
+ 'boxes': boxes,
+ 'gt_classes': -np.ones((num_boxes,), dtype=np.int32),
+ 'gt_overlaps': overlaps,
+ 'flipped': False,
+ 'seg_areas': np.zeros((num_boxes,), dtype=np.float32),
+ 'image_level_labels':np.zeros((1, self.num_classes), dtype=np.int32)
+ })
+ return roidb
+
+ @staticmethod
+ def merge_roidbs(a, b):
+ assert len(a) == len(b)
+ for i in range(len(a)):
+ a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
+ a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
+ b[i]['gt_classes']))
+ a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
+ b[i]['gt_overlaps']])
+ a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
+ b[i]['seg_areas']))
+ a[i]['image_level_labels'] = a[i]['image_level_labels']
+ return a
+
+ def competition_mode(self, on):
+ """Turn competition mode on or off."""
+ pass
diff --git a/lib/datasets/pascal_voc.py b/lib/datasets/pascal_voc.py
new file mode 100644
index 0000000..6bed8f5
--- /dev/null
+++ b/lib/datasets/pascal_voc.py
@@ -0,0 +1,417 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick and Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+from datasets.imdb import imdb
+import datasets.ds_utils as ds_utils
+import xml.etree.ElementTree as ET
+import numpy as np
+import scipy.sparse
+import scipy.io as sio
+import pickle
+import subprocess
+import uuid
+from .voc_eval import voc_eval
+from model.config import cfg
+
+
+class pascal_voc(imdb):
+ def __init__(self, image_set, year, use_diff=False):
+ name = 'voc_' + year + '_' + image_set
+ if use_diff:
+ name += '_diff'
+ imdb.__init__(self, name)
+ self._year = year
+ self._image_set = image_set
+ self._devkit_path = self._get_default_path()
+ self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
+ self._classes = ('aeroplane', 'bicycle', 'bird', 'boat',
+ 'bottle', 'bus', 'car', 'cat', 'chair',
+ 'cow', 'diningtable', 'dog', 'horse',
+ 'motorbike', 'person', 'pottedplant',
+ 'sheep', 'sofa', 'train', 'tvmonitor')
+ self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
+ self._image_ext = '.jpg'
+ self._image_index = self._load_image_set_index()
+ # Default to roidb handler
+ self._roidb_handler = self.selective_search_roidb
+ #self._roidb_handler = self.gt_roidb
+ self._salt = str(uuid.uuid4())
+ self._comp_id = 'comp4'
+
+ # PASCAL specific config options
+ self.config = {'cleanup': True,
+ 'use_salt': True,
+ 'use_diff': use_diff,
+ 'matlab_eval': False,
+ 'rpn_file': None,
+ 'min_size': 20}
+
+ assert os.path.exists(self._devkit_path), \
+ 'VOCdevkit path does not exist: {}'.format(self._devkit_path)
+ assert os.path.exists(self._data_path), \
+ 'Path does not exist: {}'.format(self._data_path)
+
+ def image_path_at(self, i):
+ """
+ Return the absolute path to image i in the image sequence.
+ """
+ return self.image_path_from_index(self._image_index[i])
+
+ def image_path_from_index(self, index):
+ """
+ Construct an image path from the image's "index" identifier.
+ """
+ image_path = os.path.join(self._data_path, 'JPEGImages',
+ index + self._image_ext)
+ assert os.path.exists(image_path), \
+ 'Path does not exist: {}'.format(image_path)
+ return image_path
+
+ def _load_image_set_index(self):
+ """
+ Load the indexes listed in this dataset's image set file.
+ """
+ # Example path to image set file:
+ # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
+ image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
+ self._image_set + '.txt')
+ assert os.path.exists(image_set_file), \
+ 'Path does not exist: {}'.format(image_set_file)
+ with open(image_set_file) as f:
+ image_index = [x.strip() for x in f.readlines()]
+ return image_index
+
+ def _get_default_path(self):
+ """
+ Return the default path where PASCAL VOC is expected to be installed.
+ """
+ return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
+
+ def gt_roidb(self):
+ """
+ Return the database of ground-truth regions of interest.
+
+ This function loads/saves from/to a cache file to speed up future calls.
+ """
+ cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
+ if os.path.exists(cache_file):
+ with open(cache_file, 'rb') as fid:
+ try:
+ roidb = pickle.load(fid)
+ except:
+ roidb = pickle.load(fid, encoding='bytes')
+ print('{} gt roidb loaded from {}'.format(self.name, cache_file))
+ return roidb
+
+ #gt_roidb = [self._load_pascal_annotation_retrain(index)
+ gt_roidb = [self._load_pascal_annotation(index)
+ for index in self.image_index]
+ with open(cache_file, 'wb') as fid:
+ pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
+ print('wrote gt roidb to {}'.format(cache_file))
+
+ return gt_roidb
+
+ def selective_search_roidb(self):
+ """
+ Return the database of selective search regions of interest.
+ Ground-truth ROIs are also included.
+
+ This function loads/saves from/to a cache file to speed up future calls.
+ """
+ cache_file = os.path.join(self.cache_path,
+ self.name + '_selective_search_roidb.pkl')
+
+ if os.path.exists(cache_file):
+ with open(cache_file, 'rb') as fid:
+ roidb = pickle.load(fid)
+ print('{} ss roidb loaded from {}'.format(self.name, cache_file))
+
+ return roidb
+
+ if int(self._year) == 2007 or self._image_set != 'test':
+ gt_roidb = self.gt_roidb()
+ ss_roidb = self._load_selective_search_roidb(gt_roidb)
+ roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
+ else:
+ roidb = self._load_selective_search_roidb(None)
+ with open(cache_file, 'wb') as fid:
+ pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
+ print('wrote ss roidb to {}'.format(cache_file))
+
+ return roidb
+
+ def rpn_roidb(self):
+ if int(self._year) == 2007 or self._image_set != 'test':
+ gt_roidb = self.gt_roidb()
+ rpn_roidb = self._load_rpn_roidb(gt_roidb)
+ roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
+ else:
+ roidb = self._load_rpn_roidb(None)
+
+ return roidb
+
+ def _load_rpn_roidb(self, gt_roidb):
+ filename = self.config['rpn_file']
+ print('loading {}'.format(filename))
+ assert os.path.exists(filename), \
+ 'rpn data not found at: {}'.format(filename)
+ with open(filename, 'rb') as f:
+ box_list = pickle.load(f)
+ return self.create_roidb_from_box_list(box_list, gt_roidb)
+
+ def _load_selective_search_roidb(self, gt_roidb):
+ filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
+ 'selective_search_data',
+ self.name + '.mat'))
+ assert os.path.exists(filename), \
+ 'Selective search data not found at: {}'.format(filename)
+ raw_data = sio.loadmat(filename)['boxes'].ravel()
+
+ box_list = []
+ for i in range(raw_data.shape[0]):
+ boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
+ keep = ds_utils.unique_boxes(boxes)
+ boxes = boxes[keep, :]
+ keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
+ boxes = boxes[keep, :]
+ box_list.append(boxes)
+
+ return self.create_roidb_from_box_list(box_list, gt_roidb)
+
+
+ def _load_pascal_annotation(self, index):
+ """
+ Load image and bounding boxes info from XML file in the PASCAL VOC
+ format.
+ """
+ filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
+ tree = ET.parse(filename)
+ objs = tree.findall('object')
+ if not self.config['use_diff']:
+ # Exclude the samples labeled as difficult
+ non_diff_objs = [
+ obj for obj in objs if int(obj.find('difficult').text) == 0]
+ # if len(non_diff_objs) != len(objs):
+ # print 'Removed {} difficult objects'.format(
+ # len(objs) - len(non_diff_objs))
+ objs = non_diff_objs
+ num_objs = len(objs)
+
+ boxes = np.zeros((num_objs, 4), dtype=np.uint16)
+ gt_classes = np.zeros((num_objs), dtype=np.int32)
+ overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
+ # "Seg" area for pascal is just the box area
+ seg_areas = np.zeros((num_objs), dtype=np.float32)
+
+ image_level_labels = -np.ones((1, self.num_classes), dtype=np.int32)
+
+ # Load object bounding boxes into a data frame.
+ for ix, obj in enumerate(objs):
+ bbox = obj.find('bndbox')
+ # Make pixel indexes 0-based
+ x1 = float(bbox.find('xmin').text) - 1
+ y1 = float(bbox.find('ymin').text) - 1
+ x2 = float(bbox.find('xmax').text) - 1
+ y2 = float(bbox.find('ymax').text) - 1
+ cls = self._class_to_ind[obj.find('name').text.lower().strip()]
+ image_level_labels[0][cls] = 1
+ boxes[ix, :] = [x1, y1, x2, y2]
+ gt_classes[ix] = cls
+ overlaps[ix, cls] = 1.0
+ seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
+
+ overlaps = scipy.sparse.csr_matrix(overlaps)
+
+ return {'boxes': boxes,
+ 'gt_classes': gt_classes,
+ 'gt_overlaps': overlaps,
+ 'flipped': False,
+ 'seg_areas': seg_areas,
+ 'image_level_labels':image_level_labels}
+
+ def _load_pascal_annotation_retrain(self, index):
+ """
+ Load image and bounding boxes info from XML file in the PASCAL VOC
+ format./home/wan/Servers/G3/Cloud/G5/MELM-VGG16/output/VOC2007/VGG16/PsuedoGT/Top1
+ """
+ # TopThresh0.7 Top1
+ #gt_folder = '/root/Cloud/G5/MELM-VGG16/output/VOC2007/VGG16/PsuedoGT/Top1/'
+ gt_folder = '/root/Cloud/G5/MELM-VGG16/output/VOC2007/VGG16/PsuedoGT/Detections/'
+ #gt_folder = '/root/Cloud/G5/MELM-VGG16/output/VOC2007/VGG16/PsuedoGT/Top10/'
+ print('Loading ', gt_folder, ': ', index)
+ filename = os.path.join(gt_folder, index + '.txt')
+ objs = np.loadtxt(filename)
+ if objs.ndim == 1:
+ num_objs = 1
+ objs = objs.reshape(1,-1)
+ else:
+ num_objs = objs.shape[0]
+
+ boxes = np.zeros((num_objs, 4), dtype=np.uint16)
+ gt_classes = np.zeros((num_objs), dtype=np.int32)
+ overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
+ seg_areas = np.zeros((num_objs), dtype=np.float32)
+
+ # Load object bounding boxes into a data frame.
+ for i in range(num_objs):
+ # Make pixel indexes 0-based
+ x1 = objs[i][1]
+ y1 = objs[i][2]
+ x2 = objs[i][3]
+ y2 = objs[i][4]
+ boxes[i, :] = [x1, y1, x2, y2]
+ gt_classes[i] = int(objs[i][0])
+ overlaps[i, int(objs[i][0])] = 1.0
+ seg_areas[i] = (x2 - x1 + 1) * (y2 - y1 + 1)
+
+ overlaps = scipy.sparse.csr_matrix(overlaps)
+
+ return {'boxes' : boxes,
+ 'gt_classes': gt_classes,
+ 'gt_overlaps' : overlaps,
+ 'flipped' : False,
+ 'seg_areas': seg_areas}
+
+
+ def _get_comp_id(self):
+ comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
+ else self._comp_id)
+ return comp_id
+
+ def _get_voc_results_file_template(self):
+ # VOCdevkit/results/VOC2007/Main/_det_test_aeroplane.txt
+ filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
+ path = os.path.join(
+ self._devkit_path,
+ 'results',
+ 'VOC' + self._year,
+ 'Main',
+ filename)
+ return path
+
+ def _write_voc_results_file(self, all_boxes):
+ for cls_ind, cls in enumerate(self.classes):
+ if cls == '__background__':
+ continue
+ print('Writing {} VOC results file'.format(cls))
+ filename = self._get_voc_results_file_template().format(cls)
+
+ new_filename = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main') # ------------------------
+
+ if not os.path.exists(new_filename):
+ os.makedirs(new_filename)
+ with open(filename, 'wt') as f:
+ for im_ind, index in enumerate(self.image_index):
+ dets = all_boxes[cls_ind][im_ind]
+ if dets == []:
+
+ # dets = all_boxes[cls_ind][im_ind].numpy()
+ # print(dets.shape)
+ # if dets.shape[0]:
+
+ continue
+ # the VOCdevkit expects 1-based indices
+ for k in range(dets.shape[0]):
+ f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
+ format(index, dets[k, -1],
+ dets[k, 0] + 1, dets[k, 1] + 1,
+ dets[k, 2] + 1, dets[k, 3] + 1))
+
+ def _do_python_eval(self, output_dir='output'):
+ annopath = os.path.join(
+ self._devkit_path,
+ 'VOC' + self._year,
+ 'Annotations',
+ '{:s}.xml')
+ imagesetfile = os.path.join(
+ self._devkit_path,
+ 'VOC' + self._year,
+ 'ImageSets',
+ 'Main',
+ self._image_set + '.txt')
+ cachedir = os.path.join(self._devkit_path, 'annotations_cache')
+ aps = []
+ # The PASCAL VOC metric changed in 2010
+ use_07_metric = True if int(self._year) < 2010 else False
+ print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
+ if not os.path.isdir(output_dir):
+ os.mkdir(output_dir)
+ for i, cls in enumerate(self._classes):
+ if cls == '__background__':
+ continue
+ filename = self._get_voc_results_file_template().format(cls)
+ rec, prec, ap = voc_eval(
+ filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
+ use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
+ aps += [ap]
+ print(('AP for {} = {:.4f}'.format(cls, ap)))
+ with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
+ pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
+ print(('Mean AP = {:.4f}'.format(np.mean(aps))))
+ print('~~~~~~~~')
+ print('Results:')
+ for ap in aps:
+ print(('{:.3f}'.format(ap)))
+ print(('{:.3f}'.format(np.mean(aps))))
+ print('~~~~~~~~')
+ print('')
+ print('--------------------------------------------------------------')
+ print('Results computed with the **unofficial** Python eval code.')
+ print('Results should be very close to the official MATLAB eval code.')
+ print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
+ print('-- Thanks, The Management')
+ print('--------------------------------------------------------------')
+
+ def _do_matlab_eval(self, output_dir='output'):
+ print('-----------------------------------------------------')
+ print('Computing results with the official MATLAB eval code.')
+ print('-----------------------------------------------------')
+ path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
+ 'VOCdevkit-matlab-wrapper')
+ cmd = 'cd {} && '.format(path)
+ cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
+ cmd += '-r "dbstop if error; '
+ cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
+ .format(self._devkit_path, self._get_comp_id(),
+ self._image_set, output_dir)
+ print(('Running:\n{}'.format(cmd)))
+ status = subprocess.call(cmd, shell=True)
+
+ def evaluate_detections(self, all_boxes, output_dir):
+ self._write_voc_results_file(all_boxes)
+ self._do_python_eval(output_dir)
+ if self.config['matlab_eval']:
+ self._do_matlab_eval(output_dir)
+ if self.config['cleanup']:
+ for cls in self._classes:
+ if cls == '__background__':
+ continue
+ filename = self._get_voc_results_file_template().format(cls)
+ os.remove(filename)
+
+ def competition_mode(self, on):
+ if on:
+ self.config['use_salt'] = False
+ self.config['cleanup'] = False
+ else:
+ self.config['use_salt'] = True
+ self.config['cleanup'] = True
+
+
+if __name__ == '__main__':
+ from datasets.pascal_voc import pascal_voc
+
+ d = pascal_voc('trainval', '2007')
+ res = d.roidb
+ from IPython import embed;
+
+ embed()
diff --git a/lib/datasets/tools/mcg_munge.py b/lib/datasets/tools/mcg_munge.py
new file mode 100644
index 0000000..1392aa3
--- /dev/null
+++ b/lib/datasets/tools/mcg_munge.py
@@ -0,0 +1,38 @@
+import os
+import sys
+
+"""Hacky tool to convert file system layout of MCG boxes downloaded from
+http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/mcg/
+so that it's consistent with those computed by Jan Hosang (see:
+http://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal-
+ computing/research/object-recognition-and-scene-understanding/how-
+ good-are-detection-proposals-really/)
+
+NB: Boxes from the MCG website are in (y1, x1, y2, x2) order.
+Boxes from Hosang et al. are in (x1, y1, x2, y2) order.
+"""
+
+def munge(src_dir):
+ # stored as: ./MCG-COCO-val2014-boxes/COCO_val2014_000000193401.mat
+ # want: ./MCG/mat/COCO_val2014_0/COCO_val2014_000000141/COCO_val2014_000000141334.mat
+
+ files = os.listdir(src_dir)
+ for fn in files:
+ base, ext = os.path.splitext(fn)
+ # first 14 chars / first 22 chars / all chars + .mat
+ # COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat
+ first = base[:14]
+ second = base[:22]
+ dst_dir = os.path.join('MCG', 'mat', first, second)
+ if not os.path.exists(dst_dir):
+ os.makedirs(dst_dir)
+ src = os.path.join(src_dir, fn)
+ dst = os.path.join(dst_dir, fn)
+ print 'MV: {} -> {}'.format(src, dst)
+ os.rename(src, dst)
+
+if __name__ == '__main__':
+ # src_dir should look something like:
+ # src_dir = 'MCG-COCO-val2014-boxes'
+ src_dir = sys.argv[1]
+ munge(src_dir)
diff --git a/lib/datasets/voc_eval.py b/lib/datasets/voc_eval.py
new file mode 100644
index 0000000..17a9ee8
--- /dev/null
+++ b/lib/datasets/voc_eval.py
@@ -0,0 +1,214 @@
+# --------------------------------------------------------
+# Fast/er R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Bharath Hariharan
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import xml.etree.ElementTree as ET
+import os
+import pickle
+import numpy as np
+
+def parse_rec(filename):
+ """ Parse a PASCAL VOC xml file """
+ tree = ET.parse(filename)
+ objects = []
+ for obj in tree.findall('object'):
+ obj_struct = {}
+ obj_struct['name'] = obj.find('name').text
+ obj_struct['pose'] = obj.find('pose').text
+ obj_struct['truncated'] = int(obj.find('truncated').text)
+ obj_struct['difficult'] = int(obj.find('difficult').text)
+ bbox = obj.find('bndbox')
+ obj_struct['bbox'] = [int(bbox.find('xmin').text),
+ int(bbox.find('ymin').text),
+ int(bbox.find('xmax').text),
+ int(bbox.find('ymax').text)]
+ objects.append(obj_struct)
+
+ return objects
+
+
+def voc_ap(rec, prec, use_07_metric=False):
+ """ ap = voc_ap(rec, prec, [use_07_metric])
+ Compute VOC AP given precision and recall.
+ If use_07_metric is true, uses the
+ VOC 07 11 point method (default:False).
+ """
+ if use_07_metric:
+ # 11 point metric
+ ap = 0.
+ for t in np.arange(0., 1.1, 0.1):
+ if np.sum(rec >= t) == 0:
+ p = 0
+ else:
+ p = np.max(prec[rec >= t])
+ ap = ap + p / 11.
+ else:
+ # correct AP calculation
+ # first append sentinel values at the end
+ mrec = np.concatenate(([0.], rec, [1.]))
+ mpre = np.concatenate(([0.], prec, [0.]))
+
+ # compute the precision envelope
+ for i in range(mpre.size - 1, 0, -1):
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
+
+ # to calculate area under PR curve, look for points
+ # where X axis (recall) changes value
+ i = np.where(mrec[1:] != mrec[:-1])[0]
+
+ # and sum (\Delta recall) * prec
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
+ return ap
+
+
+def voc_eval(detpath,
+ annopath,
+ imagesetfile,
+ classname,
+ cachedir,
+ ovthresh=0.5,
+ use_07_metric=False,
+ use_diff=False):
+ """rec, prec, ap = voc_eval(detpath,
+ annopath,
+ imagesetfile,
+ classname,
+ [ovthresh],
+ [use_07_metric])
+
+ Top level function that does the PASCAL VOC evaluation.
+
+ detpath: Path to detections
+ detpath.format(classname) should produce the detection results file.
+ annopath: Path to annotations
+ annopath.format(imagename) should be the xml annotations file.
+ imagesetfile: Text file containing the list of images, one image per line.
+ classname: Category name (duh)
+ cachedir: Directory for caching the annotations
+ [ovthresh]: Overlap threshold (default = 0.5)
+ [use_07_metric]: Whether to use VOC07's 11 point AP computation
+ (default False)
+ """
+ # assumes detections are in detpath.format(classname)
+ # assumes annotations are in annopath.format(imagename)
+ # assumes imagesetfile is a text file with each line an image name
+ # cachedir caches the annotations in a pickle file
+
+ # first load gt
+ if not os.path.isdir(cachedir):
+ os.mkdir(cachedir)
+ cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)
+ # read list of images
+ with open(imagesetfile, 'r') as f:
+ lines = f.readlines()
+ imagenames = [x.strip() for x in lines]
+
+ if not os.path.isfile(cachefile):
+ # load annotations
+ recs = {}
+ for i, imagename in enumerate(imagenames):
+ recs[imagename] = parse_rec(annopath.format(imagename))
+ if i % 100 == 0:
+ print('Reading annotation for {:d}/{:d}'.format(
+ i + 1, len(imagenames)))
+ # save
+ print('Saving cached annotations to {:s}'.format(cachefile))
+ with open(cachefile, 'wb') as f:
+ pickle.dump(recs, f)
+ else:
+ # load
+ with open(cachefile, 'rb') as f:
+ try:
+ recs = pickle.load(f)
+ except:
+ recs = pickle.load(f, encoding='bytes')
+
+ # extract gt objects for this class
+ class_recs = {}
+ npos = 0
+ for imagename in imagenames:
+ R = [obj for obj in recs[imagename] if obj['name'] == classname]
+ bbox = np.array([x['bbox'] for x in R])
+ if use_diff:
+ difficult = np.array([False for x in R]).astype(np.bool)
+ else:
+ difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
+ det = [False] * len(R)
+ npos = npos + sum(~difficult)
+ class_recs[imagename] = {'bbox': bbox,
+ 'difficult': difficult,
+ 'det': det}
+
+ # read dets
+ detfile = detpath.format(classname)
+ with open(detfile, 'r') as f:
+ lines = f.readlines()
+
+ splitlines = [x.strip().split(' ') for x in lines]
+ image_ids = [x[0] for x in splitlines]
+ confidence = np.array([float(x[1]) for x in splitlines])
+ BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
+
+ nd = len(image_ids)
+ tp = np.zeros(nd)
+ fp = np.zeros(nd)
+
+ if BB.shape[0] > 0:
+ # sort by confidence
+ sorted_ind = np.argsort(-confidence)
+ sorted_scores = np.sort(-confidence)
+ BB = BB[sorted_ind, :]
+ image_ids = [image_ids[x] for x in sorted_ind]
+
+ # go down dets and mark TPs and FPs
+ for d in range(nd):
+ R = class_recs[image_ids[d]]
+ bb = BB[d, :].astype(float)
+ ovmax = -np.inf
+ BBGT = R['bbox'].astype(float)
+
+ if BBGT.size > 0:
+ # compute overlaps
+ # intersection
+ ixmin = np.maximum(BBGT[:, 0], bb[0])
+ iymin = np.maximum(BBGT[:, 1], bb[1])
+ ixmax = np.minimum(BBGT[:, 2], bb[2])
+ iymax = np.minimum(BBGT[:, 3], bb[3])
+ iw = np.maximum(ixmax - ixmin + 1., 0.)
+ ih = np.maximum(iymax - iymin + 1., 0.)
+ inters = iw * ih
+
+ # union
+ uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
+ (BBGT[:, 2] - BBGT[:, 0] + 1.) *
+ (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
+
+ overlaps = inters / uni
+ ovmax = np.max(overlaps)
+ jmax = np.argmax(overlaps)
+
+ if ovmax > ovthresh:
+ if not R['difficult'][jmax]:
+ if not R['det'][jmax]:
+ tp[d] = 1.
+ R['det'][jmax] = 1
+ else:
+ fp[d] = 1.
+ else:
+ fp[d] = 1.
+
+ # compute precision recall
+ fp = np.cumsum(fp)
+ tp = np.cumsum(tp)
+ rec = tp / float(npos)
+ # avoid divide by zero in case the first detection matches a difficult
+ # ground truth
+ prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
+ ap = voc_ap(rec, prec, use_07_metric)
+
+ return rec, prec, ap
diff --git a/lib/faster_rcnn.egg-info/PKG-INFO b/lib/faster_rcnn.egg-info/PKG-INFO
new file mode 100644
index 0000000..c59e971
--- /dev/null
+++ b/lib/faster_rcnn.egg-info/PKG-INFO
@@ -0,0 +1,10 @@
+Metadata-Version: 1.0
+Name: faster-rcnn
+Version: 0.1
+Summary: Weakly Supervised Obejct Detection
+Home-page: UNKNOWN
+Author: GaoWei
+Author-email: UNKNOWN
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
diff --git a/lib/faster_rcnn.egg-info/SOURCES.txt b/lib/faster_rcnn.egg-info/SOURCES.txt
new file mode 100644
index 0000000..fc53b63
--- /dev/null
+++ b/lib/faster_rcnn.egg-info/SOURCES.txt
@@ -0,0 +1,73 @@
+setup_cuda.py
+datasets/__init__.py
+datasets/coco.py
+datasets/ds_utils.py
+datasets/factory.py
+datasets/imdb.py
+datasets/pascal_voc.py
+datasets/voc_eval.py
+faster_rcnn.egg-info/PKG-INFO
+faster_rcnn.egg-info/SOURCES.txt
+faster_rcnn.egg-info/dependency_links.txt
+faster_rcnn.egg-info/not-zip-safe
+faster_rcnn.egg-info/top_level.txt
+layer_utils/__init__.py
+layer_utils/anchor_target_layer.py
+layer_utils/generate_anchors.py
+layer_utils/proposal_layer.py
+layer_utils/proposal_target_layer.py
+layer_utils/proposal_top_layer.py
+layer_utils/snippets.py
+layer_utils/roi_align/__init__.py
+layer_utils/roi_align/build.py
+layer_utils/roi_align/crop_and_resize.py
+layer_utils/roi_align/roi_align.py
+layer_utils/roi_align/_ext/__init__.py
+layer_utils/roi_align/_ext/crop_and_resize/__init__.py
+layer_utils/roi_pooling/__init__.py
+layer_utils/roi_pooling/build.py
+layer_utils/roi_pooling/_ext/__init__.py
+layer_utils/roi_pooling/_ext/roi_pooling/__init__.py
+layer_utils/roi_pooling/functions/__init__.py
+layer_utils/roi_pooling/functions/roi_pool.py
+layer_utils/roi_pooling/modules/__init__.py
+layer_utils/roi_pooling/modules/roi_pool.py
+model/__init__.py
+model/apmetric.py
+model/bbox_transform.py
+model/config.py
+model/nms_wrapper.py
+model/test.py
+model/train_val.py
+nets/__init__.py
+nets/mobilenet_v1.py
+nets/network.py
+nets/resnet_v1.py
+nets/vgg16.py
+nms/__init__.py
+nms/build.py
+nms/pth_nms.py
+nms/_ext/__init__.py
+nms/_ext/nms/__init__.py
+ops/nms/src/nms_cpu.cpp
+ops/nms/src/nms_cuda.cpp
+ops/nms/src/nms_kernel.cu
+ops/nms/src/soft_nms_cpu.cpp
+ops/roi_align/src/roi_align_cuda.cpp
+ops/roi_align/src/roi_align_kernel.cu
+ops/roi_crop/src/roi_crop_cpu.cpp
+ops/roi_crop/src/roi_crop_cuda.cpp
+ops/roi_crop/src/roi_crop_kernel.cu
+ops/roi_pool/src/roi_pool_cuda.cpp
+ops/roi_pool/src/roi_pool_kernel.cu
+ops/roi_ring_pool/src/roi_ring_pool_cuda.cpp
+ops/roi_ring_pool/src/roi_ring_pool_kernel.cu
+roi_data_layer/__init__.py
+roi_data_layer/layer.py
+roi_data_layer/minibatch.py
+roi_data_layer/roidb.py
+utils/__init__.py
+utils/bbox.py
+utils/blob.py
+utils/timer.py
+utils/visualization.py
\ No newline at end of file
diff --git a/lib/faster_rcnn.egg-info/dependency_links.txt b/lib/faster_rcnn.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/faster_rcnn.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/lib/faster_rcnn.egg-info/not-zip-safe b/lib/faster_rcnn.egg-info/not-zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/faster_rcnn.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/lib/faster_rcnn.egg-info/top_level.txt b/lib/faster_rcnn.egg-info/top_level.txt
new file mode 100644
index 0000000..1630b74
--- /dev/null
+++ b/lib/faster_rcnn.egg-info/top_level.txt
@@ -0,0 +1,8 @@
+datasets
+layer_utils
+model
+nets
+nms
+ops
+roi_data_layer
+utils
diff --git a/lib/make.sh b/lib/make.sh
new file mode 100644
index 0000000..1d3f18f
--- /dev/null
+++ b/lib/make.sh
@@ -0,0 +1,38 @@
+CUDA_ARCH="-gencode arch=compute_30,code=sm_30 \
+ -gencode arch=compute_35,code=sm_35 \
+ -gencode arch=compute_50,code=sm_50 \
+ -gencode arch=compute_52,code=sm_52 \
+ -gencode arch=compute_60,code=sm_60 \
+ -gencode arch=compute_61,code=sm_61 "
+
+# Build RoiPooling module
+
+cd layer_utils/roi_pooling/src
+echo "Compiling roi_pooling kernels by nvcc..."
+nvcc -c -o roi_pooling_kernel.cu.o roi_pooling_kernel.cu -x cu -Xcompiler -fPIC $CUDA_ARCH
+cd ../
+python build.py
+cd ../../
+
+# Build RoiRingpooling module
+cd layer_utils/roi_ring_pooling/src
+echo "Compiling roi_ring_pooling kernels by nvcc"
+nvcc -c -o roi_ring_pooling_kernel.cu.o roi_ring_pooling_kernel.cu -x cu -Xcompiler -fPIC $CUDA_ARCH
+cd ..
+python build.py
+
+# Build RoIAlign
+cd layer_utils/roi_align/src/cuda
+echo 'Compiling crop_and_resize kernels by nvcc...'
+nvcc -c -o crop_and_resize_kernel.cu.o crop_and_resize_kernel.cu -x cu -Xcompiler -fPIC $CUDA_ARCH
+cd ../../
+python build.py
+cd ../../
+
+# Build NMS
+cd nms/src/cuda
+echo "Compiling nms kernels by nvcc..."
+nvcc -c -o nms_kernel.cu.o nms_kernel.cu -x cu -Xcompiler -fPIC $CUDA_ARCH
+cd ../../
+python build.py
+cd ../
diff --git a/lib/make_cuda.sh b/lib/make_cuda.sh
new file mode 100644
index 0000000..23bfe54
--- /dev/null
+++ b/lib/make_cuda.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+export CUDA_PATH=/usr/local/cuda/
+#You may also want to ad the following
+export C_INCLUDE_PATH=/usr/local/cuda/include
+python setup_cuda.py develop
\ No newline at end of file
diff --git a/lib/model/__init__.py b/lib/model/__init__.py
new file mode 100644
index 0000000..d63bc18
--- /dev/null
+++ b/lib/model/__init__.py
@@ -0,0 +1 @@
+from . import config
diff --git a/lib/model/__pycache__/__init__.cpython-36.pyc b/lib/model/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000..cdb52eb
Binary files /dev/null and b/lib/model/__pycache__/__init__.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/__init__.cpython-37.pyc b/lib/model/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000..16984bd
Binary files /dev/null and b/lib/model/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/model/__pycache__/apmetric.cpython-36.pyc b/lib/model/__pycache__/apmetric.cpython-36.pyc
new file mode 100644
index 0000000..fde150f
Binary files /dev/null and b/lib/model/__pycache__/apmetric.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/bbox_transform.cpython-36.pyc b/lib/model/__pycache__/bbox_transform.cpython-36.pyc
new file mode 100644
index 0000000..821a773
Binary files /dev/null and b/lib/model/__pycache__/bbox_transform.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/bbox_transform.cpython-37.pyc b/lib/model/__pycache__/bbox_transform.cpython-37.pyc
new file mode 100644
index 0000000..a5dbbc0
Binary files /dev/null and b/lib/model/__pycache__/bbox_transform.cpython-37.pyc differ
diff --git a/lib/model/__pycache__/config.cpython-36.pyc b/lib/model/__pycache__/config.cpython-36.pyc
new file mode 100644
index 0000000..b648a7e
Binary files /dev/null and b/lib/model/__pycache__/config.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/config.cpython-37.pyc b/lib/model/__pycache__/config.cpython-37.pyc
new file mode 100644
index 0000000..fbbb4b3
Binary files /dev/null and b/lib/model/__pycache__/config.cpython-37.pyc differ
diff --git a/lib/model/__pycache__/nms_wrapper.cpython-36.pyc b/lib/model/__pycache__/nms_wrapper.cpython-36.pyc
new file mode 100644
index 0000000..93ed370
Binary files /dev/null and b/lib/model/__pycache__/nms_wrapper.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/nms_wrapper.cpython-37.pyc b/lib/model/__pycache__/nms_wrapper.cpython-37.pyc
new file mode 100644
index 0000000..2aa3847
Binary files /dev/null and b/lib/model/__pycache__/nms_wrapper.cpython-37.pyc differ
diff --git a/lib/model/__pycache__/test.cpython-36.pyc b/lib/model/__pycache__/test.cpython-36.pyc
new file mode 100644
index 0000000..7cf5e61
Binary files /dev/null and b/lib/model/__pycache__/test.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/test.cpython-37.pyc b/lib/model/__pycache__/test.cpython-37.pyc
new file mode 100644
index 0000000..3659ca5
Binary files /dev/null and b/lib/model/__pycache__/test.cpython-37.pyc differ
diff --git a/lib/model/__pycache__/train_val.cpython-36.pyc b/lib/model/__pycache__/train_val.cpython-36.pyc
new file mode 100644
index 0000000..ba97290
Binary files /dev/null and b/lib/model/__pycache__/train_val.cpython-36.pyc differ
diff --git a/lib/model/__pycache__/train_val.cpython-37.pyc b/lib/model/__pycache__/train_val.cpython-37.pyc
new file mode 100644
index 0000000..8db87af
Binary files /dev/null and b/lib/model/__pycache__/train_val.cpython-37.pyc differ
diff --git a/lib/model/apmetric.py b/lib/model/apmetric.py
new file mode 100644
index 0000000..fb88b7e
--- /dev/null
+++ b/lib/model/apmetric.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Nov 22 16:14:59 2018
+
+@author: vasgaoweithu
+"""
+
+import torch
+import numpy as np
+from copy import deepcopy
+import math
+
+class AveragePrecisionMeter(object):
+ def __init__(self, difficult_examples=False):
+ super(AveragePrecisionMeter, self).__init__()
+ self.reset()
+ self.difficult_examples = difficult_examples
+
+ def reset(self):
+ self.scores = torch.FloatTensor(torch.FloatStorage())
+ self.targets = torch.LongTensor(torch.LongStorage())
+
+ def add(self, output, target):
+ if not torch.is_tensor(output):
+ output = torch.from_numpy(output)
+ if not torch.is_tensor(target):
+ target = torch.from_numpy(target)
+
+ if output.dim() == 1:
+ output = output.view(-1, 1)
+ else:
+ assert output.dim() == 2, \
+ 'wrong output size (should be 1D or 2D with one column \
+ per class)'
+ if target.dim() == 1:
+ target = target.view(-1, 1)
+ else:
+ assert target.dim() == 2, \
+ 'wrong target size (should be 1D or 2D with one column \
+ per class)'
+ if self.scores.numel() > 0:
+ assert target.size(1) == self.targets.size(1), \
+ 'dimensions for output should match previously added examples.'
+
+ # make sure storage is of sufficient size
+ if self.scores.storage().size() < self.scores.numel() + output.numel():
+ new_size = math.ceil(self.scores.storage().size() * 1.5)
+ self.scores.storage().resize_(int(new_size + output.numel()))
+ self.targets.storage().resize_(int(new_size + output.numel()))
+
+ # store scores and targets
+ offset = self.scores.size(0) if self.scores.dim() > 0 else 0
+ self.scores.resize_(offset + output.size(0), output.size(1))
+ self.targets.resize_(offset + target.size(0), target.size(1))
+ self.scores.narrow(0, offset, output.size(0)).copy_(output)
+ self.targets.narrow(0, offset, target.size(0)).copy_(target)
+
+ def value(self):
+ if self.scores.numel() == 0:
+ return 0
+ ap = torch.zeros(self.scores.size(1))
+ rg = torch.arange(1, self.scores.size(0)).float()
+
+ # compute average precision for each class
+ for k in range(self.scores.size(1)):
+ # sort scores
+ scores = self.scores[:, k]
+ targets = self.targets[:, k]
+
+ # compute average precision
+ ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)
+ return ap
+
+ @staticmethod
+ def average_precision(output, target, difficult_examples=True):
+
+ # sort examples
+ sorted, indices = torch.sort(output, dim=0, descending=True)
+
+ # Computes prec@i
+ pos_count = 0.
+ total_count = 0.
+ precision_at_i = 0.
+ for i in indices:
+ label = target[i]
+ if difficult_examples and label == 0:
+ continue
+ if label == 1:
+ pos_count += 1
+ total_count += 1
+ if label == 1:
+ precision_at_i += pos_count / total_count
+ precision_at_i /= pos_count
+ return precision_at_i
\ No newline at end of file
diff --git a/lib/model/bbox_transform.py b/lib/model/bbox_transform.py
new file mode 100644
index 0000000..66916a8
--- /dev/null
+++ b/lib/model/bbox_transform.py
@@ -0,0 +1,80 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+import torch
+
+def bbox_transform(ex_rois, gt_rois):
+ ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
+ ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
+ ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
+ ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
+
+ gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
+ gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
+ gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
+ gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
+
+ targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
+ targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
+ targets_dw = torch.log(gt_widths / ex_widths)
+ targets_dh = torch.log(gt_heights / ex_heights)
+
+ targets = torch.stack(
+ (targets_dx, targets_dy, targets_dw, targets_dh), 1)
+ return targets
+
+
+def bbox_transform_inv(boxes, deltas):
+ # Input should be both tensor or both Variable and on the same device
+ if len(boxes) == 0:
+ return deltas.detach() * 0
+
+ widths = boxes[:, 2] - boxes[:, 0] + 1.0
+ heights = boxes[:, 3] - boxes[:, 1] + 1.0
+ ctr_x = boxes[:, 0] + 0.5 * widths
+ ctr_y = boxes[:, 1] + 0.5 * heights
+
+ dx = deltas[:, 0::4]
+ dy = deltas[:, 1::4]
+ dw = deltas[:, 2::4]
+ dh = deltas[:, 3::4]
+
+ pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
+ pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
+ pred_w = torch.exp(dw) * widths.unsqueeze(1)
+ pred_h = torch.exp(dh) * heights.unsqueeze(1)
+
+ pred_boxes = torch.cat(\
+ [_.unsqueeze(2) for _ in [pred_ctr_x - 0.5 * pred_w,\
+ pred_ctr_y - 0.5 * pred_h,\
+ pred_ctr_x + 0.5 * pred_w,\
+ pred_ctr_y + 0.5 * pred_h]], 2).view(len(boxes), -1)
+
+ return pred_boxes
+
+
+def clip_boxes(boxes, im_shape):
+ """
+ Clip boxes to image boundaries.
+ boxes must be tensor or Variable, im_shape can be anything but Variable
+ """
+
+ if not hasattr(boxes, 'data'):
+ boxes_ = boxes.numpy()
+
+ boxes = boxes.view(boxes.size(0), -1, 4)
+ boxes = torch.stack(\
+ [boxes[:,:,0].clamp(0, im_shape[1] - 1),
+ boxes[:,:,1].clamp(0, im_shape[0] - 1),
+ boxes[:,:,2].clamp(0, im_shape[1] - 1),
+ boxes[:,:,3].clamp(0, im_shape[0] - 1)], 2).view(boxes.size(0), -1)
+
+ return boxes
diff --git a/lib/model/config.py b/lib/model/config.py
new file mode 100644
index 0000000..b8febb6
--- /dev/null
+++ b/lib/model/config.py
@@ -0,0 +1,394 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import os.path as osp
+import numpy as np
+# `pip install easydict` if you don't have it
+from easydict import EasyDict as edict
+
+__C = edict()
+# Consumers can get config by:
+# from fast_rcnn_config import cfg
+cfg = __C
+
+#
+# Training options
+#
+__C.TRAIN = edict()
+
+# Initial learning rate
+__C.TRAIN.LEARNING_RATE = 0.001
+
+# Momentum
+__C.TRAIN.MOMENTUM = 0.9
+
+# Weight decay, for regularization
+__C.TRAIN.WEIGHT_DECAY = 0.0001
+
+# Factor for reducing the learning rate
+__C.TRAIN.GAMMA = 0.1
+
+# Step size for reducing the learning rate, currently only support one step
+__C.TRAIN.STEPSIZE = [60000]
+
+# Iteration intervals for showing the loss during training, on command line interface
+__C.TRAIN.DISPLAY = 10
+
+# Whether to double the learning rate for bias
+__C.TRAIN.DOUBLE_BIAS = True
+
+# Whether to initialize the weights with truncated normal distribution
+__C.TRAIN.TRUNCATED = False
+
+# Whether to have weight decay on bias as well
+__C.TRAIN.BIAS_DECAY = False
+
+# Whether to add ground truth boxes to the pool when sampling regions
+__C.TRAIN.USE_GT = False
+
+# Whether to use aspect-ratio grouping of training images, introduced merely for saving
+# GPU memory
+__C.TRAIN.ASPECT_GROUPING = False
+
+# The number of snapshots kept, older ones are deleted to save space
+__C.TRAIN.SNAPSHOT_KEPT = 3
+
+# The time interval for saving tensorflow summaries
+__C.TRAIN.SUMMARY_INTERVAL = 180
+
+# Scale to use during training (can list multiple scales)
+# The scale is the pixel size of an image's shortest side
+__C.TRAIN.SCALES = (600,)
+
+# Max pixel size of the longest side of a scaled input image
+__C.TRAIN.MAX_SIZE = 1000
+
+# Images to use per minibatch
+__C.TRAIN.IMS_PER_BATCH = 1
+
+# Minibatch size (number of regions of interest [ROIs])
+__C.TRAIN.BATCH_SIZE = 128
+
+# Fraction of minibatch that is labeled foreground (i.e. class > 0)
+__C.TRAIN.FG_FRACTION = 0.25
+
+# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
+__C.TRAIN.FG_THRESH = 0.55
+
+# Overlap threshold for a ROI to be considered background (class = 0 if
+# overlap in [LO, HI))
+__C.TRAIN.BG_THRESH_HI = 0.45
+__C.TRAIN.BG_THRESH_LO = 0.1
+
+__C.TRAIN.MIL_FG_THRESH = 0.6
+__C.TRAIN.MIL_BG_THRESH_LO = 0.1
+__C.TRAIN.MIL_BG_THRESH_HI = 0.4
+__C.TRAIN.MIL_RECURRENT_STEP = 30000
+__C.TRAIN.MIL_RECURRECT_WEIGHT = 0.5
+
+__C.TRAIN.MIL_NUM_FG = 32
+__C.TRAIN.MIL_NUM_BG = 96
+__C.TRAIN.MIL_BATCHSIZE = 128
+
+
+# Use horizontally-flipped images during training?
+__C.TRAIN.USE_FLIPPED = True
+
+# Train bounding-box regressors
+__C.TRAIN.BBOX_REG = True
+
+# Overlap required between a ROI and ground-truth box in order for that ROI to
+# be used as a bounding-box regression training example
+__C.TRAIN.BBOX_THRESH = 0.5
+
+# Iterations between snapshots
+__C.TRAIN.SNAPSHOT_ITERS = 5000
+__C.TRAIN.STEP_ITERS = 50000 # voc07
+
+# solver.prototxt specifies the snapshot path prefix, this adds an optional
+# infix to yield the path: [_]_iters_XYZ.caffemodel
+__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
+
+# Normalize the targets (subtract empirical mean, divide by empirical stddev)
+__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
+
+# Deprecated (inside weights)
+__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
+
+# Normalize the targets using "precomputed" (or made up) means and stdevs
+# (BBOX_NORMALIZE_TARGETS must also be True)
+__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
+
+__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
+
+__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
+
+# Train using these proposals
+__C.TRAIN.PROPOSAL_METHOD = 'gt'
+
+# Make minibatches from images that have similar aspect ratios (i.e. both
+# tall and thin or both short and wide) in order to avoid wasting computation
+# on zero-padding.
+
+# Use RPN to detect objects
+__C.TRAIN.HAS_RPN = True
+
+# IOU >= thresh: positive example
+__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
+
+# IOU < thresh: negative example
+__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
+
+# If an anchor satisfied by positive and negative conditions set to negative
+__C.TRAIN.RPN_CLOBBER_POSITIVES = False
+
+# Max number of foreground examples
+__C.TRAIN.RPN_FG_FRACTION = 0.5
+
+# Total number of examples
+__C.TRAIN.RPN_BATCHSIZE = 256
+
+# NMS threshold used on RPN proposals
+__C.TRAIN.RPN_NMS_THRESH = 0.7
+
+# Number of top scoring boxes to keep before apply NMS to RPN proposals
+__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
+
+# Number of top scoring boxes to keep after applying NMS to RPN proposals
+__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
+
+# Deprecated (outside weights)
+__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
+
+# Give the positive RPN examples weight of p * 1 / {num positives}
+# and give negatives a weight of (1 - p)
+# Set to -1.0 to use uniform example weighting
+__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
+
+# Whether to use all ground truth bounding boxes for training,
+# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
+__C.TRAIN.USE_ALL_GT = True
+
+#
+# Testing options
+#
+__C.TEST = edict()
+
+# Scale to use during testing (can NOT list multiple scales)
+# The scale is the pixel size of an image's shortest side
+__C.TEST.SCALES = (600,)
+
+# Max pixel size of the longest side of a scaled input image
+__C.TEST.MAX_SIZE = 1000
+
+# Overlap threshold used for non-maximum suppression (suppress boxes with
+# IoU >= this threshold)
+__C.TEST.NMS = 0.3
+
+# Experimental: treat the (K+1) units in the cls_score layer as linear
+# predictors (trained, eg, with one-vs-rest SVMs).
+__C.TEST.SVM = False
+
+# Test using bounding-box regressors
+__C.TEST.BBOX_REG = True
+
+# Propose boxes
+__C.TEST.HAS_RPN = False
+
+# Test using these proposals
+__C.TEST.PROPOSAL_METHOD = 'gt'
+
+## NMS threshold used on RPN proposals
+__C.TEST.RPN_NMS_THRESH = 0.7
+
+# Number of top scoring boxes to keep before apply NMS to RPN proposals
+__C.TEST.RPN_PRE_NMS_TOP_N = 6000
+
+# Number of top scoring boxes to keep after applying NMS to RPN proposals
+__C.TEST.RPN_POST_NMS_TOP_N = 300
+
+# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
+# __C.TEST.RPN_MIN_SIZE = 16
+
+# Testing mode, default to be 'nms', 'top' is slower but better
+# See report for details
+__C.TEST.MODE = 'nms'
+
+# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
+__C.TEST.RPN_TOP_N = 5000
+
+#
+# ResNet options
+#
+
+__C.RESNET = edict()
+
+# Option to set if max-pooling is appended after crop_and_resize.
+# if true, the region will be resized to a square of 2xPOOLING_SIZE,
+# then 2x2 max-pooling is applied; otherwise the region will be directly
+# resized to a square of POOLING_SIZE
+__C.RESNET.MAX_POOL = False
+
+# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
+# Range: 0 (none) to 3 (all)
+__C.RESNET.FIXED_BLOCKS = 1
+
+#
+# MobileNet options
+#
+
+__C.MOBILENET = edict()
+
+# Whether to regularize the depth-wise filters during training
+__C.MOBILENET.REGU_DEPTH = False
+
+# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
+# Range: 0 (none) to 12 (all)
+__C.MOBILENET.FIXED_LAYERS = 5
+
+# Weight decay for the mobilenet weights
+__C.MOBILENET.WEIGHT_DECAY = 0.00004
+
+# Depth multiplier
+__C.MOBILENET.DEPTH_MULTIPLIER = 1.
+
+#
+# MISC
+#
+
+# Pixel mean values (BGR order) as a (1, 1, 3) array
+# We use the same pixel mean for all networks even though it's not exactly what
+# they were trained with
+__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
+
+# For reproducibility
+__C.RNG_SEED = 3
+
+# Root directory of project
+__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
+
+# Data directory
+__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
+
+# Name (or path to) the matlab executable
+__C.MATLAB = 'matlab'
+
+# Place outputs under an experiments directory
+__C.EXP_DIR = 'default'
+
+# Use GPU implementation of non-maximum suppression
+__C.USE_GPU_NMS = True
+
+# Default pooling mode
+__C.POOLING_MODE = 'crop'
+
+# Size of the pooled region after RoI pooling
+__C.POOLING_SIZE = 7
+
+# Anchor scales for RPN
+__C.ANCHOR_SCALES = [8,16,32]
+
+# Anchor ratios for RPN
+__C.ANCHOR_RATIOS = [0.5,1,2]
+
+# Number of filters for the RPN layer
+__C.RPN_CHANNELS = 512
+
+
+def get_output_dir(imdb, weights_filename):
+ """Return the directory where experimental artifacts are placed.
+ If the directory does not exist, it is created.
+
+ A canonical path is built using the name from an imdb and a network
+ (if not None).
+ """
+ outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
+ if weights_filename is None:
+ weights_filename = 'default'
+ outdir = osp.join(outdir, weights_filename)
+ if not os.path.exists(outdir):
+ os.makedirs(outdir)
+ return outdir
+
+
+def get_output_tb_dir(imdb, weights_filename):
+ """Return the directory where tensorflow summaries are placed.
+ If the directory does not exist, it is created.
+
+ A canonical path is built using the name from an imdb and a network
+ (if not None).
+ """
+ outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
+ if weights_filename is None:
+ weights_filename = 'default'
+ outdir = osp.join(outdir, weights_filename)
+ if not os.path.exists(outdir):
+ os.makedirs(outdir)
+ return outdir
+
+
+def _merge_a_into_b(a, b):
+ """Merge config dictionary a into config dictionary b, clobbering the
+ options in b whenever they are also specified in a.
+ """
+ if type(a) is not edict:
+ return
+
+ for k, v in a.items():
+ # a must specify keys that are in b
+ if k not in b:
+ raise KeyError('{} is not a valid config key'.format(k))
+
+ # the types must match, too
+ old_type = type(b[k])
+ if old_type is not type(v):
+ if isinstance(b[k], np.ndarray):
+ v = np.array(v, dtype=b[k].dtype)
+ else:
+ raise ValueError(('Type mismatch ({} vs. {}) '
+ 'for config key: {}').format(type(b[k]),
+ type(v), k))
+
+ # recursively merge dicts
+ if type(v) is edict:
+ try:
+ _merge_a_into_b(a[k], b[k])
+ except:
+ print(('Error under config key: {}'.format(k)))
+ raise
+ else:
+ b[k] = v
+
+
+def cfg_from_file(filename):
+ """Load a config file and merge it into the default options."""
+ import yaml
+ with open(filename, 'r') as f:
+ yaml_cfg = edict(yaml.load(f))
+
+ _merge_a_into_b(yaml_cfg, __C)
+
+
+def cfg_from_list(cfg_list):
+ """Set config keys via list (e.g., from command line)."""
+ from ast import literal_eval
+ assert len(cfg_list) % 2 == 0
+ for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
+ key_list = k.split('.')
+ d = __C
+ for subkey in key_list[:-1]:
+ assert subkey in d
+ d = d[subkey]
+ subkey = key_list[-1]
+ assert subkey in d
+ try:
+ value = literal_eval(v)
+ except:
+ # handle the case when v is a string literal
+ value = v
+ assert type(value) == type(d[subkey]), \
+ 'type {} does not match original type {}'.format(
+ type(value), type(d[subkey]))
+ d[subkey] = value
diff --git a/lib/model/nms_wrapper.py b/lib/model/nms_wrapper.py
new file mode 100644
index 0000000..3e45e6a
--- /dev/null
+++ b/lib/model/nms_wrapper.py
@@ -0,0 +1,17 @@
+# --------------------------------------------------------
+# Fast R-CNN
+# Copyright (c) 2015 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ross Girshick
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from nms.pth_nms import pth_nms
+
+
+def nms(dets, thresh):
+ """Dispatch to either CPU or GPU NMS implementations.
+ Accept dets as tensor"""
+ return pth_nms(dets, thresh)
diff --git a/lib/model/test.py b/lib/model/test.py
new file mode 100644
index 0000000..600085d
--- /dev/null
+++ b/lib/model/test.py
@@ -0,0 +1,295 @@
+# --------------------------------------------------------
+# Tensorflow Faster R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import cv2
+import numpy as np
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+import os
+import math
+
+from utils.timer import Timer
+from ops.nms import nms
+from utils.blob import im_list_to_blob
+
+from model.config import cfg, get_output_dir
+from model.bbox_transform import clip_boxes, bbox_transform_inv
+
+import torch
+
+from model.apmetric import AveragePrecisionMeter
+
+
+def _get_image_blob(im):
+ """Converts an image into a network input.
+ Arguments:
+ im (ndarray): a color image in BGR order
+ Returns:
+ blob (ndarray): a data blob holding an image pyramid
+ im_scale_factors (list): list of image scales (relative to im) used
+ in the image pyramid
+ """
+ im_orig = im.astype(np.float32, copy=True)
+ im_orig -= cfg.PIXEL_MEANS
+
+ im_shape = im_orig.shape
+ im_size_min = np.min(im_shape[0:2])
+ im_size_max = np.max(im_shape[0:2])
+
+ processed_ims = []
+ im_scale_factors = []
+ im_shape = []
+
+ for target_size in cfg.TEST.SCALES:
+ im_scale = float(target_size) / float(im_size_min)
+ # Prevent the biggest axis from being more than MAX_SIZE
+ if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
+ im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
+ im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
+ interpolation=cv2.INTER_LINEAR)
+ im_scale_factors.append(im_scale)
+ processed_ims.append(im)
+ im_shape.append(im.shape)
+
+ # Create a blob to hold the input images
+ blob = im_list_to_blob(processed_ims)
+
+ return blob, np.array(im_scale_factors), im_shape
+
+
+def _get_blobs(im):
+ """Convert an image and RoIs within that image into network inputs."""
+ blobs = {}
+ blobs['data'], im_scale_factors, im_shape = _get_image_blob(im)
+
+ return blobs, im_scale_factors, im_shape
+
+
+def _clip_boxes(boxes, im_shape):
+ """Clip boxes to image boundaries."""
+ # x1 >= 0
+ boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
+ # y1 >= 0
+ boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
+ # x2 < im_shape[1]
+ boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
+ # y2 < im_shape[0]
+ boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
+ return boxes
+
+def _shuffle_boxes(boxes, w, h):
+ num_ss = boxes.shape[0]
+ jet_scale = np.random.uniform(0.95, 1.15, num_ss)
+ jet_center_w = np.random.uniform(-0.05, 0.05, num_ss)
+ jet_center_h = np.random.uniform(-0.05, 0.05, num_ss)
+ ss_box_jet = boxes[:, 1:].copy()
+ widths = ss_box_jet[:, 2] - ss_box_jet[:, 0] + 1.0
+ heights = ss_box_jet[:, 3] - ss_box_jet[:, 1] + 1.0
+ ctr_x = ss_box_jet[:, 0] + 0.5 * widths + jet_center_w * widths
+ ctr_y = ss_box_jet[:, 1] + 0.5 * heights + jet_center_h * heights
+ widths_new = widths * jet_scale
+ heights_new = heights * jet_scale
+ boxes[:, 1] = ctr_x - widths_new * 0.5
+ boxes[:, 2] = ctr_y - heights_new * 0.5
+ boxes[:, 3] = ctr_x + widths_new * 0.5
+ boxes[:, 4] = ctr_y + heights_new * 0.5
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, w - 1)
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, h - 1)
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, w - 1)
+ boxes[:, 4] = np.clip(boxes[:, 4], 0, h - 1)
+ return boxes
+
+def _rescale_boxes(boxes, inds, scales):
+ """Rescale boxes according to image rescaling."""
+ for i in range(boxes.shape[0]):
+ boxes[i, :] = boxes[i, :] / scales[int(inds[i])]
+
+ return boxes
+
+def get_ss_boxes(roidb_i, im_scales):
+ ss_inds = np.where(roidb_i['gt_classes'] == -1)[0] # remove gt_rois in ss_boxes
+ ss_boxes = np.empty((len(ss_inds), 5), dtype=np.float32)
+ ss_boxes[:, 1:] = roidb_i['boxes'][ss_inds, :] * im_scales
+ ss_boxes[:, 0] = 0
+ return ss_boxes
+
+
+def get_flipper_boxes(ss_box, width):
+ oldx1 = ss_box[:, 1].copy()
+ oldx2 = ss_box[:, 3].copy()
+ ss_box[:, 1] = width - oldx2 - 1
+ ss_box[:, 3] = width - oldx1 - 1
+ return ss_box
+
+
+def im_detect(net, im, roidb_i):
+ blobs, im_scales, im_shape = _get_blobs(im)
+ # ss_boxes = get_ss_boxes(roidb_i, im_scales)
+ # assert len(im_scales) == 1, "Only single-image batch implemented"
+ # assert len(im_scales) == 5, "multiple scale test"
+
+ im_blob_scales = blobs['data']
+
+ score_list = []
+ pred_box_list = []
+ det_cls_list = []
+
+ for index, scale in enumerate(im_scales):
+ im_blob = im_blob_scales[index]
+ ss_boxes = get_ss_boxes(roidb_i, scale)
+ h, w = im_shape[index][0], im_shape[index][1]
+ ss_boxes_2 = ss_boxes.copy()
+ ss_boxes = _shuffle_boxes(ss_boxes, w, h)
+ ss_boxes_2 = _shuffle_boxes(ss_boxes_2, w, h)
+
+ im_blob_flip = im_blob.copy()[:, ::-1, :]
+ ss_boxes_flip = get_flipper_boxes(ss_boxes_2.copy(), im_blob.shape[1])
+ for k in range(2): # input and its flip
+ if k % 2 == 1:
+ im_blob_flip = im_blob_flip[np.newaxis, :, :, :]
+ img_info = np.array([im_blob.shape[1], im_blob.shape[2], scale], dtype=np.float32)
+ bbox_pred, rois, det_cls_prob, det_cls_prob_product, refine_prob_1, refine_prob_2 = net.test_image(im_blob_flip, img_info, ss_boxes_flip)
+ boxes = ss_boxes_2[:, 1:5] / scale
+ else:
+ im_blob = im_blob[np.newaxis, :, :, :]
+ img_info = np.array([im_blob.shape[1], im_blob.shape[2], scale], dtype=np.float32)
+ bbox_pred, rois, det_cls_prob, det_cls_prob_product, refine_prob_1, refine_prob_2 = net.test_image(im_blob, img_info, ss_boxes)
+ boxes = ss_boxes[:, 1:5] / scale
+
+ # scores = np.reshape((refine_prob_1 + refine_prob_2) / 2, [det_cls_prob_product.shape[0], -1])
+ scores = np.reshape(refine_prob_1, [det_cls_prob_product.shape[0], -1])
+ bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
+
+ if cfg.TEST.BBOX_REG:
+ box_deltas = bbox_pred
+ pred_boxes = bbox_transform_inv(torch.from_numpy(boxes), torch.from_numpy(box_deltas)).numpy()
+ pred_boxes = _clip_boxes(pred_boxes, im.shape)
+ else:
+ pred_boxes = np.tile(boxes, (1, scores.shape[1]))
+ score_list.append(scores)
+ pred_box_list.append(pred_boxes)
+ det_cls_list.append(det_cls_prob)
+
+ scores = np.array(score_list).mean(axis=0)
+ pred_boxes = np.array(pred_box_list).mean(axis=0)
+ det_cls_prob = np.array(det_cls_list).mean(axis=0)
+ target = np.reshape(roidb_i['image_level_labels'], (-1))
+ return scores, pred_boxes, det_cls_prob, target
+
+
+def apply_nms(all_boxes, thresh):
+ """Apply non-maximum suppression to all predicted boxes output by the
+ test_net method.
+ """
+ num_classes = len(all_boxes)
+ num_images = len(all_boxes[0])
+ nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
+ for cls_ind in range(num_classes):
+ for im_ind in range(num_images):
+ dets = all_boxes[cls_ind][im_ind]
+ if dets == []:
+ continue
+
+ x1 = dets[:, 0]
+ y1 = dets[:, 1]
+ x2 = dets[:, 2]
+ y2 = dets[:, 3]
+ scores = dets[:, 4]
+ inds = np.where((x2 > x1) & (y2 > y1))[0]
+ dets = dets[inds, :]
+ if dets == []:
+ continue
+
+ keep = nms(torch.from_numpy(dets), thresh).numpy()
+ if len(keep) == 0:
+ continue
+ nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
+ return nms_boxes
+
+
+def test_net(net, imdb, roidb, weights_filename, max_per_image=100, thresh=0.):
+ np.random.seed(cfg.RNG_SEED)
+ """Test a Fast R-CNN network on an image database."""
+ num_images = len(imdb.image_index)
+ # all detections are collected into:
+ # all_boxes[cls][image] = N x 5 array of detections in
+ # (x1, y1, x2, y2, score)
+ all_boxes = [[[] for _ in range(num_images)]
+ for _ in range(imdb.num_classes)]
+
+ output_dir = get_output_dir(imdb, weights_filename)
+
+ # -------------------------------------------------------
+ ap_meter = AveragePrecisionMeter(difficult_examples=True)
+ ap_meter.reset()
+ # timers
+ _t = {'im_detect': Timer(), 'misc': Timer()}
+
+ for i in range(num_images):
+ im = cv2.imread(imdb.image_path_at(i))
+
+ _t['im_detect'].tic()
+ scores, boxes, det_cls_prob, target = im_detect(net, im, roidb[i])
+ _t['im_detect'].toc()
+
+ _t['misc'].tic()
+
+ output = np.reshape(det_cls_prob[:], (1, -1))
+ target = np.reshape(target[:], (1, -1))
+ ap_meter.add(output, target)
+
+ # skip j = 0, because it's the background class
+ for j in range(0, imdb.num_classes):
+ inds = np.where(scores[:, j] > thresh)[0]
+ cls_scores = scores[inds, j]
+ cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
+ cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
+ .astype(np.float32, copy=False)
+ # keep = nms(torch.from_numpy(cls_dets), cfg.TEST.NMS).numpy() if cls_dets.size > 0 else []
+ # cls_dets = cls_dets[keep, :]
+ # all_boxes[j][i] = cls_dets
+ keep = nms(torch.from_numpy(cls_dets), cfg.TEST.NMS) if cls_dets.size > 0 else []
+ all_boxes[j][i] = keep[0].numpy()
+
+ # Limit to max_per_image detections *over all classes*
+ if max_per_image > 0:
+ image_scores = np.hstack([all_boxes[j][i][:, -1]
+ for j in range(1, imdb.num_classes)])
+ if len(image_scores) > max_per_image:
+ image_thresh = np.sort(image_scores)[-max_per_image]
+ for j in range(1, imdb.num_classes):
+ keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
+ all_boxes[j][i] = all_boxes[j][i][keep, :]
+ _t['misc'].toc()
+
+ if i % 100 == 0:
+ print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'.format(i + 1, num_images, _t['im_detect'].average_time(),
+ _t['misc'].average_time()))
+
+ ap = ap_meter.value().numpy()
+ print('the classification AP is ')
+ for index, cls in enumerate(imdb._classes):
+ if cls == '__background__':
+ continue
+ print(('AP for {} = {:.4f}'.format(cls, ap[index])))
+ print('__________________')
+ map = 100 * ap.mean()
+ print('the mAP is {:.4f}'.format(map))
+
+ det_file = os.path.join(output_dir, 'detections.pkl')
+ with open(det_file, 'wb') as f:
+ pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
+
+ print('Evaluating detections')
+ imdb.evaluate_detections(all_boxes, output_dir)
+
+
diff --git a/lib/model/train_val.py b/lib/model/train_val.py
new file mode 100644
index 0000000..e547fda
--- /dev/null
+++ b/lib/model/train_val.py
@@ -0,0 +1,378 @@
+# --------------------------------------------------------
+# Tensorflow Faster R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Xinlei Chen and Zheqi He
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorboardX as tb
+
+from model.config import cfg
+import roi_data_layer.roidb as rdl_roidb
+from roi_data_layer.layer import RoIDataLayer
+import utils.timer
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import torch
+import torch.optim as optim
+import numpy as np
+import os
+import glob
+import time
+import torch.nn.functional as F
+from torch import nn
+
+def update_learning_rate(optimizer, cur_lr, new_lr):
+ """Update learning rate"""
+ if cur_lr != new_lr:
+ ratio = _get_lr_change_ratio(cur_lr, new_lr)
+ param_keys = []
+ for ind, param_group in enumerate(optimizer.param_groups):
+ param_group['lr'] = new_lr
+ param_keys += param_group['params']
+
+def _get_lr_change_ratio(cur_lr, new_lr):
+ eps = 1e-10
+ ratio = np.max(
+ (new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps)))
+ )
+ return ratio
+
+def scale_lr(optimizer, scale):
+ """Scale the learning rate of the optimizer"""
+ for param_group in optimizer.param_groups:
+ param_group['lr'] *= scale
+
+class SolverWrapper(object):
+ """
+ A wrapper class for the training process
+ """
+
+ def __init__(self, network, imdb, roidb, valroidb, output_dir, tbdir, pretrained_model=None):
+ self.net = network
+ self.imdb = imdb
+ self.roidb = roidb
+ self.valroidb = valroidb
+ self.output_dir = output_dir
+ self.tbdir = tbdir
+ # Simply put '_val' at the end to save the summaries from the validation set
+ self.tbvaldir = tbdir + '_val'
+ if not os.path.exists(self.tbvaldir):
+ os.makedirs(self.tbvaldir)
+ self.pretrained_model = pretrained_model
+
+ def snapshot(self, iter):
+
+ if not os.path.exists(self.output_dir):
+ os.makedirs(self.output_dir)
+
+ # Store the model snapshot
+ filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pth'
+ filename = os.path.join(self.output_dir, filename)
+ torch.save(self.net.state_dict(), filename)
+ print('Wrote snapshot to: {:s}'.format(filename))
+
+ # Also store some meta information, random state, etc.
+ nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pkl'
+ nfilename = os.path.join(self.output_dir, nfilename)
+ # current state of numpy random
+ st0 = np.random.get_state()
+ # current position in the database
+ cur = self.data_layer._cur
+ # current shuffled indexes of the database
+ perm = self.data_layer._perm
+ # current position in the validation database
+ cur_val = self.data_layer_val._cur
+ # current shuffled indexes of the validation database
+ perm_val = self.data_layer_val._perm
+
+ # Dump the meta info
+ with open(nfilename, 'wb') as fid:
+ pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
+ pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
+ pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
+ pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
+ pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
+ pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
+
+ return filename, nfilename
+
+ def from_snapshot(self, sfile, nfile):
+ print('Restoring model snapshots from {:s}'.format(sfile))
+ self.net.load_state_dict(torch.load(str(sfile)))
+ print('Restored.')
+ # Needs to restore the other hyper-parameters/states for training, (TODO xinlei) I have
+ # tried my best to find the random states so that it can be recovered exactly
+ # However the Tensorflow state is currently not available
+ with open(nfile, 'rb') as fid:
+ st0 = pickle.load(fid)
+ cur = pickle.load(fid)
+ perm = pickle.load(fid)
+ cur_val = pickle.load(fid)
+ perm_val = pickle.load(fid)
+ last_snapshot_iter = pickle.load(fid)
+
+ np.random.set_state(st0)
+ self.data_layer._cur = cur
+ self.data_layer._perm = perm
+ self.data_layer_val._cur = cur_val
+ self.data_layer_val._perm = perm_val
+
+ return last_snapshot_iter
+
+ def construct_graph(self):
+ # Set the random seed
+ torch.manual_seed(cfg.RNG_SEED)
+ # Build the main computation graph
+ self.net.create_architecture(self.imdb.num_classes, tag='default',
+ anchor_scales=cfg.ANCHOR_SCALES,
+ anchor_ratios=cfg.ANCHOR_RATIOS)
+ # Define the loss
+ # loss = layers['total_loss']
+ # Set learning rate and momentum
+ lr = cfg.TRAIN.LEARNING_RATE
+ params = []
+ for key, value in dict(self.net.named_parameters()).items():
+ if value.requires_grad:
+ if 'refine' in key and 'bias' in key:
+ params += [{'params':[value],'lr':10*lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
+ elif 'refine' in key and 'bias' not in key:
+ params += [{'params':[value],'lr':10*lr, 'weight_decay': getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)}]
+ elif 'refine' not in key and 'bias' in key:
+ params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
+ else:
+ params += [{'params':[value],'lr':lr, 'weight_decay': getattr(value, 'weight_decay', cfg.TRAIN.WEIGHT_DECAY)}]
+ self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
+ # Write the train and validation information to tensorboard
+ self.writer = tb.writer.FileWriter(self.tbdir)
+ self.valwriter = tb.writer.FileWriter(self.tbvaldir)
+
+ return lr, self.optimizer
+
+ def find_previous(self):
+ sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pth')
+ sfiles = glob.glob(sfiles)
+ sfiles.sort(key=os.path.getmtime)
+ # Get the snapshot name in pytorch
+ redfiles = []
+ for stepsize in cfg.TRAIN.STEPSIZE:
+ redfiles.append(os.path.join(self.output_dir,
+ cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.pth'.format(stepsize+1)))
+ #sfiles = [ss for ss in sfiles if ss not in redfiles]
+
+ nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')
+ nfiles = glob.glob(nfiles)
+ nfiles.sort(key=os.path.getmtime)
+ redfiles = [redfile.replace('.pth', '.pkl') for redfile in redfiles]
+ #nfiles = [nn for nn in nfiles if nn not in redfiles]
+
+ lsf = len(sfiles)
+ assert len(nfiles) == lsf
+
+ return lsf, nfiles, sfiles
+
+ def initialize(self):
+ # Initial file lists are empty
+ np_paths = []
+ ss_paths = []
+ # Fresh train directly from ImageNet weights
+ print('Loading initial model weights from {:s}'.format(self.pretrained_model))
+ self.net.load_pretrained_cnn(torch.load(self.pretrained_model))
+ print('Loaded.')
+ # Need to fix the variables before loading, so that the RGB weights are changed to BGR
+ # For VGG16 it also changes the convolutional weights fc6 and fc7 to
+ # fully connected weights
+ last_snapshot_iter = 0
+ lr = cfg.TRAIN.LEARNING_RATE
+ stepsizes = list(cfg.TRAIN.STEPSIZE)
+
+ return lr, last_snapshot_iter, stepsizes, np_paths, ss_paths
+
+ def restore(self, sfile, nfile):
+ # Get the most recent snapshot and restore
+ np_paths = [nfile]
+ ss_paths = [sfile]
+ # Restore model from snapshots
+ last_snapshot_iter = self.from_snapshot(sfile, nfile)
+ # Set the learning rate
+ lr_scale = 1
+ stepsizes = []
+ for stepsize in cfg.TRAIN.STEPSIZE:
+ if last_snapshot_iter > stepsize:
+ lr_scale *= cfg.TRAIN.GAMMA
+ else:
+ stepsizes.append(stepsize)
+ scale_lr(self.optimizer, lr_scale)
+ lr = cfg.TRAIN.LEARNING_RATE * lr_scale
+ return lr, last_snapshot_iter, stepsizes, np_paths, ss_paths
+
+ def remove_snapshot(self, np_paths, ss_paths):
+ to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
+ for c in range(to_remove):
+ nfile = np_paths[0]
+ os.remove(str(nfile))
+ np_paths.remove(nfile)
+
+ to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
+ for c in range(to_remove):
+ sfile = ss_paths[0]
+ # To make the code compatible to earlier versions of Tensorflow,
+ # where the naming tradition for checkpoints are different
+ os.remove(str(sfile))
+ ss_paths.remove(sfile)
+
+ def train_model(self, max_iters):
+ # Build data layers for both training and validation set
+ self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
+ self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
+
+ # Construct the computation graph
+ lr, train_op = self.construct_graph()
+
+ # Find previous snapshots if there is any to restore from
+ lsf, nfiles, sfiles = self.find_previous()
+
+ # Initialize the variables or restore them from the last snapshot
+ if lsf == 0:
+ lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize()
+ else:
+ lr, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(str(sfiles[-1]), str(nfiles[-1]))
+ iter = last_snapshot_iter + 1
+ last_summary_time = time.time()
+ # Make sure the lists are not empty
+ stepsizes.append(max_iters)
+ stepsizes.reverse()
+ next_stepsize = stepsizes.pop()
+
+ self.net.train()
+ self.net.to(self.net._device)
+
+ while iter < max_iters + 1:
+ # Learning rate
+ if iter == next_stepsize + 1:
+ # Add snapshot here before reducing the learning rate
+ self.snapshot(iter)
+ lr *= cfg.TRAIN.GAMMA
+ scale_lr(self.optimizer, cfg.TRAIN.GAMMA)
+ next_stepsize = stepsizes.pop()
+ #if ((iter -1) % cfg.TRAIN.MIL_RECURRENT_STEP) == 0:
+ # num_epoch = int((iter - 1) / cfg.TRAIN.MIL_RECURRENT_STEP) + 1
+ # cfg.TRAIN.MIL_RECURRECT_WEIGHT = ((num_epoch - 1)/20.0)/1.5
+ #if iter == cfg.TRAIN.MIL_RECURRENT_STEP + 1:
+ # cfg.TRAIN.MIL_RECURRECT_WEIGHT = cfg.TRAIN.MIL_RECURRECT_WEIGHT * 10
+
+ utils.timer.timer.tic()
+ # Get training data, one batch at a time
+ blobs = self.data_layer.forward(iter)
+
+ now = time.time()
+ if iter == 1 or now - last_summary_time > cfg.TRAIN.SUMMARY_INTERVAL:
+ # Compute the graph with summary
+ cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, total_loss, summary = \
+ self.net.train_step_with_summary(blobs, self.optimizer, iter)
+ # cls_det_loss, refine_loss_1, refine_loss_2, total_loss, summary = \
+ # self.net.train_step_with_summary(blobs, self.optimizer, iter)
+ #for _sum in summary: self.writer.add_summary(_sum, float(iter))
+ # Also check the summary on the validation set
+ #blobs_val = self.data_layer_val.forward()
+ #summary_val = self.net.get_summary(blobs_val, iter, drop_block)
+ #summary_val = self.net.get_summary(blobs_val, iter)
+
+ #for _sum in summary_val: self.valwriter.add_summary(_sum, float(iter))
+ last_summary_time = now
+ else:
+ # Compute the graph without summary
+ #cls_det_loss, refine_loss_1, refine_loss_2, total_loss = self.net.train_step(blobs, self.optimizer, iter)
+ cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, total_loss = self.net.train_step(blobs,self.optimizer,iter)
+ utils.timer.timer.toc()
+
+ # Display training information
+ if iter % (cfg.TRAIN.DISPLAY) == 0:
+ # print('iter: %d / %d, total loss: %.6f\n >>> cls_det_loss: %.6f\n '
+ # '>>> refine_loss_1: %.6f\n >>> refine_loss_2: %.6f\n >>> lr: %f' % \
+ # (iter, max_iters, total_loss, cls_det_loss, refine_loss_1, refine_loss_2, lr))
+ print('iter: %d / %d, total loss: %.6f\n >>> cls_det_loss: %.6f\n '
+ '>>> refine_loss_1: %.6f\n >>> refine_loss_2: %.6f\n >>> consistency_loss: %.6f\n >>> lr: %f' % \
+ (iter, max_iters, total_loss, cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, lr))
+ print('speed: {:.3f}s / iter'.format(utils.timer.timer.average_time()))
+
+ # for k in utils.timer.timer._average_time.keys():
+ # print(k, utils.timer.timer.average_time(k))
+
+ # Snapshotting
+ if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
+ last_snapshot_iter = iter
+ ss_path, np_path = self.snapshot(iter)
+ np_paths.append(np_path)
+ ss_paths.append(ss_path)
+
+ # Remove the old snapshots if there are too many
+ if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
+ self.remove_snapshot(np_paths, ss_paths)
+
+ iter += 1
+
+ if last_snapshot_iter != iter - 1:
+ self.snapshot(iter - 1)
+
+ self.writer.close()
+ self.valwriter.close()
+
+
+def get_training_roidb(imdb):
+ """Returns a roidb (Region of Interest database) for use in training."""
+ if cfg.TRAIN.USE_FLIPPED:
+ print('Appending horizontally-flipped training examples...')
+ imdb.append_flipped_images()
+ print('done')
+
+ print('Preparing training data...')
+ rdl_roidb.prepare_roidb(imdb)
+ print('done')
+
+ return imdb.roidb
+
+
+def filter_roidb(roidb):
+ """Remove roidb entries that have no usable RoIs."""
+
+ def is_valid(entry):
+ # Valid images have:
+ # (1) At least one foreground RoI OR
+ # (2) At least one background RoI
+ overlaps = entry['max_overlaps']
+ # find boxes with sufficient overlap
+ fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
+ # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
+ bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
+ (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
+ # image is only valid if such boxes exist
+ valid = len(fg_inds) > 0 or len(bg_inds) > 0
+ return valid
+
+ num = len(roidb)
+ filtered_roidb = [entry for entry in roidb if is_valid(entry)]
+ num_after = len(filtered_roidb)
+ print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
+ num, num_after))
+ return filtered_roidb
+
+
+def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
+ pretrained_model=None,
+ max_iters=40000):
+ """Train a Faster R-CNN network."""
+ roidb = filter_roidb(roidb)
+ valroidb = filter_roidb(valroidb)
+
+ sw = SolverWrapper(network, imdb, roidb, valroidb, output_dir, tb_dir,
+ pretrained_model=pretrained_model)
+
+ print('Solving...')
+ sw.train_model(max_iters)
+ print('done solving')
diff --git a/lib/nets/__init__.py b/lib/nets/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/lib/nets/__pycache__/__init__.cpython-36.pyc b/lib/nets/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000..4ef374d
Binary files /dev/null and b/lib/nets/__pycache__/__init__.cpython-36.pyc differ
diff --git a/lib/nets/__pycache__/__init__.cpython-37.pyc b/lib/nets/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000..dcc2a9c
Binary files /dev/null and b/lib/nets/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/nets/__pycache__/mobilenet_v1.cpython-36.pyc b/lib/nets/__pycache__/mobilenet_v1.cpython-36.pyc
new file mode 100644
index 0000000..cb69272
Binary files /dev/null and b/lib/nets/__pycache__/mobilenet_v1.cpython-36.pyc differ
diff --git a/lib/nets/__pycache__/mobilenet_v1.cpython-37.pyc b/lib/nets/__pycache__/mobilenet_v1.cpython-37.pyc
new file mode 100644
index 0000000..ff4340b
Binary files /dev/null and b/lib/nets/__pycache__/mobilenet_v1.cpython-37.pyc differ
diff --git a/lib/nets/__pycache__/network.cpython-36.pyc b/lib/nets/__pycache__/network.cpython-36.pyc
new file mode 100644
index 0000000..f70d3be
Binary files /dev/null and b/lib/nets/__pycache__/network.cpython-36.pyc differ
diff --git a/lib/nets/__pycache__/network.cpython-37.pyc b/lib/nets/__pycache__/network.cpython-37.pyc
new file mode 100644
index 0000000..359bea7
Binary files /dev/null and b/lib/nets/__pycache__/network.cpython-37.pyc differ
diff --git a/lib/nets/__pycache__/resnet_v1.cpython-36.pyc b/lib/nets/__pycache__/resnet_v1.cpython-36.pyc
new file mode 100644
index 0000000..8be4e6f
Binary files /dev/null and b/lib/nets/__pycache__/resnet_v1.cpython-36.pyc differ
diff --git a/lib/nets/__pycache__/resnet_v1.cpython-37.pyc b/lib/nets/__pycache__/resnet_v1.cpython-37.pyc
new file mode 100644
index 0000000..d54ab65
Binary files /dev/null and b/lib/nets/__pycache__/resnet_v1.cpython-37.pyc differ
diff --git a/lib/nets/__pycache__/vgg16.cpython-36.pyc b/lib/nets/__pycache__/vgg16.cpython-36.pyc
new file mode 100644
index 0000000..bd553d8
Binary files /dev/null and b/lib/nets/__pycache__/vgg16.cpython-36.pyc differ
diff --git a/lib/nets/__pycache__/vgg16.cpython-37.pyc b/lib/nets/__pycache__/vgg16.cpython-37.pyc
new file mode 100644
index 0000000..2bae43d
Binary files /dev/null and b/lib/nets/__pycache__/vgg16.cpython-37.pyc differ
diff --git a/lib/nets/mobilenet_v1.py b/lib/nets/mobilenet_v1.py
new file mode 100644
index 0000000..d68b408
--- /dev/null
+++ b/lib/nets/mobilenet_v1.py
@@ -0,0 +1,262 @@
+# --------------------------------------------------------
+# Tensorflow Faster R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Variable
+
+import numpy as np
+from collections import namedtuple, OrderedDict
+
+from nets.network import Network
+from model.config import cfg
+
+# The following is adapted from:
+# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py
+
+# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
+# Conv defines 3x3 convolution layers
+# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
+# stride is the stride of the convolution
+# depth is the number of channels or filters in a layer
+Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
+DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
+
+# _CONV_DEFS specifies the MobileNet body
+_CONV_DEFS = [
+ Conv(kernel=3, stride=2, depth=32),
+ DepthSepConv(kernel=3, stride=1, depth=64),
+ DepthSepConv(kernel=3, stride=2, depth=128),
+ DepthSepConv(kernel=3, stride=1, depth=128),
+ DepthSepConv(kernel=3, stride=2, depth=256),
+ DepthSepConv(kernel=3, stride=1, depth=256),
+ DepthSepConv(kernel=3, stride=2, depth=512),
+ DepthSepConv(kernel=3, stride=1, depth=512),
+ DepthSepConv(kernel=3, stride=1, depth=512),
+ DepthSepConv(kernel=3, stride=1, depth=512),
+ DepthSepConv(kernel=3, stride=1, depth=512),
+ DepthSepConv(kernel=3, stride=1, depth=512),
+ # use stride 1 for the 13th layer
+ DepthSepConv(kernel=3, stride=1, depth=1024),
+ DepthSepConv(kernel=3, stride=1, depth=1024)
+]
+
+def mobilenet_v1_base(final_endpoint='Conv2d_13_pointwise',
+ min_depth=8,
+ depth_multiplier=1.0,
+ conv_defs=None,
+ output_stride=None):
+ """Mobilenet v1.
+
+ Constructs a Mobilenet v1 network from inputs to the given final endpoint.
+
+ Args:
+ inputs: a tensor of shape [batch_size, height, width, channels].
+ final_endpoint: specifies the endpoint to construct the network up to. It
+ can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
+ 'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5_pointwise',
+ 'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
+ 'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
+ 'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
+ min_depth: Minimum depth value (number of channels) for all convolution ops.
+ Enforced when depth_multiplier < 1, and not an active constraint when
+ depth_multiplier >= 1.
+ depth_multiplier: Float multiplier for the depth (number of channels)
+ for all convolution ops. The value must be greater than zero. Typical
+ usage will be to set this value in (0, 1) to reduce the number of
+ parameters or computation cost of the model.
+ conv_defs: A list of ConvDef namedtuples specifying the net architecture.
+ output_stride: An integer that specifies the requested ratio of input to
+ output spatial resolution. If not None, then we invoke atrous convolution
+ if necessary to prevent the network from reducing the spatial resolution
+ of the activation maps. Allowed values are 8 (accurate fully convolutional
+ mode), 16 (fast fully convolutional mode), 32 (classification mode).
+ scope: Optional variable_scope.
+
+ Returns:
+ tensor_out: output tensor corresponding to the final_endpoint.
+ end_points: a set of activations for external use, for example summaries or
+ losses.
+
+ Raises:
+ ValueError: if final_endpoint is not set to one of the predefined values,
+ or depth_multiplier <= 0, or the target output_stride is not
+ allowed.
+ """
+ depth = lambda d: max(int(d * depth_multiplier), min_depth)
+ end_points = OrderedDict()
+
+ # Used to find thinned depths for each layer.
+ if depth_multiplier <= 0:
+ raise ValueError('depth_multiplier is not greater than zero.')
+
+ if conv_defs is None:
+ conv_defs = _CONV_DEFS
+
+ if output_stride is not None and output_stride not in [8, 16, 32]:
+ raise ValueError('Only allowed output_stride values are 8, 16, 32.')
+
+ def conv_bn(in_channels, out_channels, kernel_size=3, stride=1):
+ return nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, kernel_size, stride, (kernel_size - 1) // 2, bias=False),
+ nn.BatchNorm2d(out_channels),
+ nn.ReLU6(inplace=True)
+ )
+
+ def conv_dw(in_channels, kernel_size=3, stride=1, dilation=1):
+ return nn.Sequential(
+ nn.Conv2d(in_channels, in_channels, kernel_size, stride, (kernel_size - 1) // 2,\
+ groups=in_channels, dilation=dilation, bias=False),
+ nn.BatchNorm2d(in_channels),
+ nn.ReLU6(inplace=True)
+ )
+
+ def conv_pw(in_channels, out_channels, kernel_size=3, stride=1, dilation=1):
+ return nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, kernel_size, stride, 0, bias=False),
+ nn.BatchNorm2d(out_channels),
+ nn.ReLU6(inplace=True),
+ )
+
+ # The current_stride variable keeps track of the output stride of the
+ # activations, i.e., the running product of convolution strides up to the
+ # current network layer. This allows us to invoke atrous convolution
+ # whenever applying the next convolution would result in the activations
+ # having output stride larger than the target output_stride.
+ current_stride = 1
+
+ # The atrous convolution rate parameter.
+ rate = 1
+
+ in_channels = 3
+ for i, conv_def in enumerate(conv_defs):
+ end_point_base = 'Conv2d_%d' % i
+
+ if output_stride is not None and current_stride == output_stride:
+ # If we have reached the target output_stride, then we need to employ
+ # atrous convolution with stride=1 and multiply the atrous rate by the
+ # current unit's stride for use in subsequent layers.
+ layer_stride = 1
+ layer_rate = rate
+ rate *= conv_def.stride
+ else:
+ layer_stride = conv_def.stride
+ layer_rate = 1
+ current_stride *= conv_def.stride
+
+ out_channels = depth(conv_def.depth)
+ if isinstance(conv_def, Conv):
+ end_point = end_point_base
+ end_points[end_point] = conv_bn(in_channels, out_channels, conv_def.kernel,
+ stride=conv_def.stride)
+ if end_point == final_endpoint:
+ return nn.Sequential(end_points)
+
+ elif isinstance(conv_def, DepthSepConv):
+ end_points[end_point_base] = nn.Sequential(OrderedDict([
+ ('depthwise', conv_dw(in_channels, conv_def.kernel, stride=layer_stride, dilation=layer_rate)),
+ ('pointwise', conv_pw(in_channels, out_channels, 1, stride=1))]))
+
+ if end_point_base + '_pointwise' == final_endpoint:
+ return nn.Sequential(end_points)
+ else:
+ raise ValueError('Unknown convolution type %s for layer %d'
+ % (conv_def.ltype, i))
+ in_channels = out_channels
+ raise ValueError('Unknown final endpoint %s' % final_endpoint)
+
+class mobilenetv1(Network):
+ def __init__(self):
+ Network.__init__(self)
+ self._feat_stride = [16, ]
+ self._feat_compress = [1. / float(self._feat_stride[0]), ]
+ self._depth_multiplier = cfg.MOBILENET.DEPTH_MULTIPLIER
+ self._net_conv_channels = 512
+ self._fc7_channels = 1024
+
+ def init_weights(self):
+ def normal_init(m, mean, stddev, truncated=False):
+ """
+ weight initalizer: truncated normal and random normal.
+ """
+ if m.__class__.__name__.find('Conv') == -1:
+ return
+ if truncated:
+ m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
+ else:
+ m.weight.data.normal_(mean, stddev)
+ if m.bias is not None: m.bias.data.zero_()
+
+ self.mobilenet.apply(lambda m: normal_init(m, 0, 0.09, True))
+ normal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)
+
+ def _image_to_head(self):
+ net_conv = self._layers['head'](self._image)
+ self._act_summaries['conv'] = net_conv
+
+ return net_conv
+
+ def _head_to_tail(self, pool5):
+ fc7 = self._layers['tail'](pool5)
+ fc7 = fc7.mean(3).mean(2)
+ return fc7
+
+ def _init_head_tail(self):
+ self.mobilenet = mobilenet_v1_base()
+
+ # Fix blocks
+ assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
+ for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:
+ for p in m.parameters():
+ p.requires_grad = False
+
+ def set_bn_fix(m):
+ classname = m.__class__.__name__
+ if classname.find('BatchNorm') != -1:
+ for p in m.parameters(): p.requires_grad=False
+
+ self.mobilenet.apply(set_bn_fix)
+
+ # Add weight decay
+ def l2_regularizer(m, wd, regu_depth):
+ if m.__class__.__name__.find('Conv') != -1:
+ if regu_depth or m.groups == 1:
+ m.weight.weight_decay = wd
+ else:
+ m.weight.weight_decay = 0
+ self.mobilenet.apply(lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY, cfg.MOBILENET.REGU_DEPTH))
+
+ # Build mobilenet.
+ self._layers['head'] = nn.Sequential(*list(self.mobilenet.children())[:12])
+ self._layers['tail'] = nn.Sequential(*list(self.mobilenet.children())[12:])
+
+ def train(self, mode=True):
+ # Override train so that the training mode is set as we want
+ nn.Module.train(self, mode)
+ if mode:
+ # Set fixed blocks to be in eval mode (not really doing anything)
+ for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:
+ m.eval()
+
+ # Set batchnorm always in eval mode during training
+ def set_bn_eval(m):
+ classname = m.__class__.__name__
+ if classname.find('BatchNorm') != -1:
+ m.eval()
+
+ self.mobilenet.apply(set_bn_eval)
+
+ def load_pretrained_cnn(self, state_dict):
+ print('Warning: No available pretrained model yet')
+ self.mobilenet.load_state_dict({k: state_dict['features.'+k] for k in list(self.mobilenet.state_dict())})
diff --git a/lib/nets/network.py b/lib/nets/network.py
new file mode 100644
index 0000000..c24f999
--- /dev/null
+++ b/lib/nets/network.py
@@ -0,0 +1,871 @@
+# --------------------------------------------------------
+# Tensorflow Faster R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from utils.visualization import draw_bounding_boxes
+
+from ops.roi_pool import RoIPool
+from ops.roi_align import RoIAlign
+from ops.roi_ring_pool import RoIRingPool
+
+from model.config import cfg
+from utils.bbox import bbox_overlaps
+import tensorboardX as tb
+
+from scipy.misc import imresize
+from sklearn.cluster import KMeans
+import random
+from torch.autograd import Variable
+
+import numpy.random as npr
+
+import math
+
+try:
+ xrange # Python 2
+except NameError:
+ xrange = range # Python 3
+
+
+class Network(nn.Module):
+ def __init__(self):
+ nn.Module.__init__(self)
+ self._predictions = {}
+ self._losses = {}
+ self._layers = {}
+ self._gt_image = None
+ self._event_summaries = {}
+ self._image_gt_summaries = {}
+ self._device = 'cuda'
+ self.RoIPool = RoIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1. / 16)
+ self.RoIAlign = RoIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1. / 16)
+ self.RoIRingPool = RoIRingPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1. / 16, 0., 1.0)
+ self.RoIRingPool_context = RoIRingPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1. / 16, 1.0, 1.8)
+ self.RoIRingPool_frame = RoIRingPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1. / 16, 1.0 / 1.8, 1.0)
+ self.aug_time = 4
+ self.ca_iw = True
+
+ def _add_gt_image(self):
+ # add back mean
+ image = self._image_gt_summaries['image'] + cfg.PIXEL_MEANS
+ image = imresize(image[0], self._im_info[:2] / self._im_info[2])
+ # BGR to RGB (opencv uses BGR)
+ self._gt_image = image[np.newaxis, :, :, ::-1].copy(order='C')
+
+ def _add_gt_image_summary(self):
+ # use a customized visualization function to visualize the boxes
+ self._add_gt_image()
+ image = draw_bounding_boxes(self._gt_image, self._image_gt_summaries['gt_boxes'], self._image_gt_summaries['im_info'])
+ return tb.summary.image('GROUND_TRUTH', image[0].astype('float32').swapaxes(1, 0).swapaxes(2, 0) / 255.0)
+
+ def _inverted_attention(self, bbox_feats_new, gt, keep_inds, refine_branch, step, fg_num, bg_num):
+ if step <= (cfg.TRAIN.STEP_ITERS + 10000):
+ fg_drop_per = 6
+ bg_drop_per = fg_drop_per * 3
+ th_l = 25
+ th_s = 16
+ else:
+ fg_drop_per = 5
+ bg_drop_per = fg_drop_per * 3
+ th_l = 29
+ th_s = 20
+
+ self.eval()
+ bbox_feats_new = Variable(bbox_feats_new.data, requires_grad=True)
+ bbox_feats_new_new = self._head_to_tail(bbox_feats_new)
+ if refine_branch == 1:
+ output_score = self.refine_net_1(bbox_feats_new_new)
+ elif refine_branch == 2:
+ output_score = self.refine_net_2(bbox_feats_new_new)
+ else:
+ print('no refine branch')
+ class_num = output_score.shape[1]
+ index = gt
+ num_rois = bbox_feats_new.shape[0]
+ num_channel = bbox_feats_new.shape[1]
+ one_hot = torch.zeros((1), dtype=torch.float32).cuda()
+ one_hot = Variable(one_hot, requires_grad=False)
+ sp_i = torch.ones([2, num_rois]).long()
+ sp_i[0, :] = torch.arange(num_rois)
+ sp_i[1, :] = torch.from_numpy(index)
+ sp_v = torch.ones([num_rois])
+ one_hot_sparse = torch.sparse.FloatTensor(sp_i, sp_v, torch.Size([num_rois, class_num])).to_dense().cuda()
+ one_hot_sparse = Variable(one_hot_sparse, requires_grad=False)
+ one_hot = torch.sum(output_score * one_hot_sparse)
+ self.vgg.classifier.zero_grad()
+ if refine_branch == 1:
+ self.refine_net_1.zero_grad()
+ elif refine_branch == 2:
+ self.refine_net_2.zero_grad()
+ else:
+ print('no refine branch')
+ one_hot.backward()
+ grads_val = bbox_feats_new.grad.clone().detach()
+ grad_channel_mean = torch.mean(grads_val.view(num_rois, num_channel, -1), dim=2)
+ grad_channel_mean = grad_channel_mean.view(num_rois, num_channel, 1, 1)
+ att_all = torch.sum(bbox_feats_new * grad_channel_mean, 1)
+ att_all = att_all.view(num_rois, 49)
+
+ self.vgg.classifier.zero_grad()
+ if refine_branch == 1:
+ self.refine_net_1.zero_grad()
+ elif refine_branch == 2:
+ self.refine_net_2.zero_grad()
+ else:
+ print('no refine branch')
+
+ thl_mask_value = torch.sort(att_all, dim=1, descending=True)[0][:, th_l]
+ thl_mask_value = thl_mask_value.view(num_rois, 1).expand(num_rois, 49)
+ mask_all_cuda = torch.where(att_all > thl_mask_value, torch.zeros(att_all.shape).cuda(), torch.ones(att_all.shape).cuda())
+ mask_all = mask_all_cuda.detach().cpu().numpy()
+ mask_all_new = np.ones((num_rois, 49), dtype=np.float32)
+ for q in keep_inds:
+ mask_all_temp = np.ones((49), dtype=np.float32)
+ zero_index = np.where(mask_all[q, :] == 0)[0]
+ num_zero_index = zero_index.size
+ if num_zero_index >= th_s:
+ dumy_index = npr.choice(zero_index, size=th_s, replace=False)
+ else:
+ zero_index = np.arange(49)
+ dumy_index = npr.choice(zero_index, size=th_s, replace=False)
+ mask_all_temp[dumy_index] = 0
+ mask_all_new[q, :] = mask_all_temp
+ mask_all = torch.from_numpy(mask_all_new.reshape(num_rois, 7, 7)).cuda()
+ mask_all = mask_all.view(num_rois, 1, 7, 7)
+
+ pooled_feat_before_after = torch.cat((bbox_feats_new, bbox_feats_new * mask_all), dim=0)
+ pooled_feat_before_after = self._head_to_tail(pooled_feat_before_after)
+ if refine_branch == 1:
+ cls_score_before_after = self.refine_net_1(pooled_feat_before_after)
+ elif refine_branch == 2:
+ cls_score_before_after = self.refine_net_2(pooled_feat_before_after)
+ else:
+ print('no refine branch')
+ cls_prob_before_after = F.softmax(cls_score_before_after, dim=1)
+ class_num = cls_prob_before_after.shape[1]
+ cls_prob_before = cls_prob_before_after[0: num_rois]
+ cls_prob_after = cls_prob_before_after[num_rois: num_rois * 2]
+ label_gt = torch.from_numpy(gt).cuda()
+ prepare_mask_fg_num = fg_num
+ prepare_mask_bg_num = bg_num
+ sp_i = torch.ones([2, num_rois]).long()
+ sp_i[0, :] = torch.arange(num_rois)
+ sp_i[1, :] = label_gt
+ sp_v = torch.ones([num_rois])
+ one_hot_sparse = torch.sparse.FloatTensor(sp_i, sp_v, torch.Size([num_rois, class_num])).to_dense().cuda()
+ before_vector = torch.sum(one_hot_sparse * cls_prob_before, dim=1)
+ after_vector = torch.sum(one_hot_sparse * cls_prob_after, dim=1)
+ change_vector = before_vector - after_vector - 0.02
+ change_vector = torch.where(change_vector > 0, change_vector, torch.zeros(change_vector.shape).cuda())
+ fg_index = torch.where(label_gt > 0, torch.ones(before_vector.shape).cuda(), torch.zeros(before_vector.shape).cuda())
+ bg_index = 1 - fg_index
+
+ if fg_index.nonzero().shape[0] != 0:
+ not_01_fg_index = fg_index.nonzero()[:, 0].long()
+ else:
+ not_01_fg_index = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).cuda().long() # for corner case
+ not_01_bg_index = bg_index.nonzero()[:, 0].long()
+ change_vector_fg = change_vector[not_01_fg_index]
+ change_vector_bg = change_vector[not_01_bg_index]
+ for_fg_change_vector = change_vector.clone()
+ for_bg_change_vector = change_vector.clone()
+ for_fg_change_vector[not_01_bg_index] = -10000
+ for_bg_change_vector[not_01_fg_index] = -10000
+
+ th_fg_value = torch.sort(change_vector_fg, dim=0, descending=True)[0][int(round(float(prepare_mask_fg_num) / fg_drop_per))]
+ drop_index_fg = for_fg_change_vector.gt(th_fg_value)
+ th_bg_value = torch.sort(change_vector_bg, dim=0, descending=True)[0][int(round(float(prepare_mask_bg_num) / bg_drop_per))]
+ drop_index_bg = for_bg_change_vector.gt(th_bg_value)
+ drop_index_fg_bg = drop_index_fg + drop_index_bg
+ ignore_index_fg_bg = 1 - drop_index_fg_bg
+ not_01_ignore_index_fg_bg = ignore_index_fg_bg.nonzero()[:, 0]
+ mask_all[not_01_ignore_index_fg_bg.long(), :] = 1
+ self.train()
+ return mask_all
+
+ def _normalize_atten_maps(self, atten_maps):
+ atten_shape = atten_maps.size()
+ batch_mins, _ = torch.min(atten_maps.view(atten_shape[0:-2] + (-1,)), dim=-1, keepdim=True)
+ batch_maxs, _ = torch.max(atten_maps.view(atten_shape[0:-2] + (-1,)), dim=-1, keepdim=True)
+ atten_normed = torch.div(atten_maps.view(atten_shape[0:-2] + (-1,))-batch_mins,
+ batch_maxs - batch_mins + 1e-7)
+ atten_normed = atten_normed.view(atten_shape)
+ return atten_normed
+
+ def _rampweight(self, iteration):
+ ramp_up_end = 45000
+ ramp_down_start = 100000
+ if (iteration < ramp_up_end):
+ ramp_weight = math.exp(-5 * math.pow((1 - iteration / ramp_up_end), 2))
+ elif (iteration > ramp_down_start):
+ ramp_weight = math.exp(-12.5 * math.pow((1 - (100000 - iteration) / 20000), 2))
+ else:
+ ramp_weight = 1
+
+ if (iteration <= 45000):
+ ramp_weight = 0
+ self.ca_iw = False
+ return ramp_weight
+
+ def _add_losses(self, roi_labels_1, keep_inds_1, roi_labels_2, keep_inds_2, step=None, rois=None):
+ det_cls_prob = self._predictions['det_cls_prob']
+ det_cls_prob = det_cls_prob.view(-1)
+ label = self._image_level_label.view(-1)
+ pi = self.ss_boxes_indexes.shape[0]
+ rampweight = self._rampweight(step)
+
+ refine_prob_1 = self._predictions['refine_prob_1']
+ refine_prob_2 = self._predictions['refine_prob_2']
+ # refine_prob_3 = self._predictions['refine_prob_3']
+
+ # caculating the loss of the first branch
+ roi_labels, keep_inds = roi_labels_1, keep_inds_1,
+ roi_labels_each = torch.tensor(roi_labels[0][keep_inds[0], :], dtype=torch.float32).cuda()
+ refine_loss_1 = - torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_1[keep_inds[0]]))) / roi_labels_each.shape[0]
+ roi_labels_each = torch.tensor(roi_labels[1][keep_inds[1], :], dtype=torch.float32).cuda()
+ refine_loss_1 -= torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_1[keep_inds[1] + pi]))) / roi_labels_each.shape[0]
+ roi_labels_each = torch.tensor(roi_labels[2][keep_inds[2], :], dtype=torch.float32).cuda()
+ refine_loss_1 -= torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_1[keep_inds[2] + pi * 2]))) / roi_labels_each.shape[0]
+ roi_labels_each = torch.tensor(roi_labels[3][keep_inds[3], :], dtype=torch.float32).cuda()
+ refine_loss_1 -= torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_1[keep_inds[3] + pi * 3]))) / roi_labels_each.shape[0]
+
+ consistency_conf_loss = 0
+ if self.ca_iw:
+ keep_inds_new = np.concatenate((keep_inds[0], keep_inds[0]+pi, keep_inds[0]+pi*2, keep_inds[0]+pi*3))
+ num_each = int(keep_inds_new.shape[0] / 4)
+ rois_self_attention_1 = torch.mean(rois[keep_inds_new], dim=1)
+ rois_self_attention_1 = torch.sigmoid(self._normalize_atten_maps(rois_self_attention_1))
+
+ rois_self_attention_gt_1 = rois_self_attention_1.clone().detach()
+ rois_self_attention_gt_1_1 = torch.max(rois_self_attention_gt_1[0:num_each], rois_self_attention_gt_1[num_each:num_each * 2].flip(dims=[2]))
+ rois_self_attention_gt_1_2 = torch.max(rois_self_attention_gt_1[num_each * 2:num_each * 3], rois_self_attention_gt_1[num_each * 3:num_each * 4].flip(dims=[2]))
+ rois_self_attention_gt_1 = torch.max(rois_self_attention_gt_1_1, rois_self_attention_gt_1_2)
+ consistency_conf_loss += F.mse_loss(rois_self_attention_1[0:num_each], rois_self_attention_gt_1)
+ consistency_conf_loss += F.mse_loss(rois_self_attention_1[num_each:num_each * 2], rois_self_attention_gt_1.flip(dims=[2]))
+ consistency_conf_loss += F.mse_loss(rois_self_attention_1[num_each * 2:num_each * 3], rois_self_attention_gt_1)
+ consistency_conf_loss += F.mse_loss(rois_self_attention_1[num_each * 3:num_each * 4], rois_self_attention_gt_1.flip(dims=[2]))
+
+ # caculating the loss of the second branch
+ roi_labels, keep_inds = roi_labels_2, keep_inds_2
+ roi_labels_each = torch.tensor(roi_labels[0][keep_inds[0], :], dtype=torch.float32).cuda()
+ refine_loss_2 = - torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_2[keep_inds[0]]))) / roi_labels_each.shape[0]
+ roi_labels_each = torch.tensor(roi_labels[1][keep_inds[1], :], dtype=torch.float32).cuda()
+ refine_loss_2 -= torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_2[keep_inds[1] + pi]))) / roi_labels_each.shape[0]
+ roi_labels_each = torch.tensor(roi_labels[2][keep_inds[2], :], dtype=torch.float32).cuda()
+ refine_loss_2 -= torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_2[keep_inds[2] + pi * 2]))) / roi_labels_each.shape[0]
+ roi_labels_each = torch.tensor(roi_labels[3][keep_inds[3], :], dtype=torch.float32).cuda()
+ refine_loss_2 -= torch.sum(torch.mul(roi_labels_each, torch.log(refine_prob_2[keep_inds[3] + pi * 3]))) / roi_labels_each.shape[0]
+
+ if self.ca_iw:
+ keep_inds_new = np.concatenate((keep_inds[0], keep_inds[0]+pi, keep_inds[0]+pi*2, keep_inds[0]+pi*3))
+ num_each = int(keep_inds_new.shape[0] / 4)
+ rois_self_attention_2 = torch.mean(rois[keep_inds_new], dim=1)
+ rois_self_attention_2 = torch.sigmoid(self._normalize_atten_maps(rois_self_attention_2))
+
+ rois_self_attention_gt_2 = rois_self_attention_2.clone().detach()
+ rois_self_attention_gt_2_1 = torch.max(rois_self_attention_gt_2[0:num_each], rois_self_attention_gt_2[num_each:num_each*2].flip(dims=[2]))
+ rois_self_attention_gt_2_2 = torch.max(rois_self_attention_gt_2[num_each*2:num_each*3], rois_self_attention_gt_2[num_each*3:num_each*4].flip(dims=[2]))
+ rois_self_attention_gt_2 = torch.max(rois_self_attention_gt_2_1, rois_self_attention_gt_2_2)
+ consistency_conf_loss += F.mse_loss(rois_self_attention_2[0:num_each], rois_self_attention_gt_2)
+ consistency_conf_loss += F.mse_loss(rois_self_attention_2[num_each:num_each*2], rois_self_attention_gt_2.flip(dims=[2]))
+ consistency_conf_loss += F.mse_loss(rois_self_attention_2[num_each*2:num_each*3], rois_self_attention_gt_2)
+ consistency_conf_loss += F.mse_loss(rois_self_attention_2[num_each*3:num_each*4], rois_self_attention_gt_2.flip(dims=[2]))
+ consistency_conf_loss /= num_each
+
+ label_new = torch.cat((label, label, label, label))
+ label_new = label_new.clone().detach().float().to(det_cls_prob.device)
+ zeros = torch.zeros(det_cls_prob.shape, dtype=det_cls_prob.dtype, device=det_cls_prob.device)
+ max_zeros = torch.max(zeros, 1 - torch.mul(label_new, det_cls_prob))
+ cls_det_loss = torch.sum(max_zeros)
+ loss = cls_det_loss / 20 + refine_loss_1 * 0.1 + refine_loss_2 * 0.1 + consistency_conf_loss * 0.1
+ loss /= float(self.aug_time)
+
+ self._losses['total_loss'] = loss
+ self._losses['cls_det_loss'] = cls_det_loss / 20
+ self._losses['refine_loss_1'] = refine_loss_1 * 0.1
+ self._losses['refine_loss_2'] = refine_loss_2 * 0.1
+ # self._losses['refine_loss_3'] = refine_loss_3
+ if self.ca_iw is False:
+ consistency_conf_loss = torch.zeros([1])
+ self._losses['consistency_loss'] = consistency_conf_loss
+ for k in self._losses.keys():
+ self._event_summaries[k] = self._losses[k]
+ return loss
+
+ def _region_classification_test(self, fc7_roi, fc7_context, fc7_frame):
+ refine_score_1 = self.refine_net_1(fc7_roi)
+ refine_score_2 = self.refine_net_2(fc7_roi)
+ # refine_score_3 = self.refine_net_3(fc7_roi)
+ cls_score = self.cls_score_net(fc7_roi)
+ context_score = self.det_score_net(fc7_context)
+ frame_score = self.det_score_net(fc7_frame)
+ det_score = frame_score - context_score
+
+ cls_prob = F.softmax(cls_score, dim=1)
+ det_prob = F.softmax(det_score, dim=0)
+ refine_prob_1 = F.softmax(refine_score_1, dim=1)
+ refine_prob_2 = F.softmax(refine_score_2, dim=1)
+ # refine_prob_3 = F.softmax(refine_score_3, dim=1)
+
+ det_cls_prob_product = torch.mul(cls_score, det_prob)
+ det_cls_prob = torch.sum(det_cls_prob_product, 0)
+ # bbox_pred = self.bbox_pred_net(fc7)
+ bbox_pred = torch.zeros(cls_prob.shape[0], 80)
+
+ self._predictions['refine_prob_1'] = refine_prob_1
+ self._predictions['refine_prob_2'] = refine_prob_2
+ # self._predictions['refine_prob_3'] = refine_prob_3
+ self._predictions["bbox_pred"] = bbox_pred
+ self._predictions['det_cls_prob_product'] = det_cls_prob_product
+ self._predictions['det_cls_prob'] = det_cls_prob
+ return cls_prob, det_prob, bbox_pred, det_cls_prob_product, det_cls_prob
+
+ def _region_classification_train(self, pool5_roi, fc7_roi, fc7_context, fc7_frame, step):
+ bbox_feats_new = pool5_roi.clone().detach()
+ fc7_roi_new = fc7_roi.clone().detach()
+ refine_score_1_new = self.refine_net_1(fc7_roi_new)
+ refine_prob_1_new = F.softmax(refine_score_1_new, dim=1)
+
+ cls_score = self.cls_score_net(fc7_roi)
+ context_score = self.det_score_net(fc7_context)
+ frame_score = self.det_score_net(fc7_frame)
+ det_score = frame_score - context_score
+
+ cls_prob = F.softmax(cls_score, dim=1)
+ pi = self.ss_boxes_indexes.shape[0]
+ if self._mode == 'TRAIN':
+ ss_rois_num_each = int(cls_score.shape[0] / 4)
+ assert ss_rois_num_each == pi
+ det_prob1 = F.softmax(det_score[ss_rois_num_each * 0: ss_rois_num_each * 1], dim=0)
+ det_prob2 = F.softmax(det_score[ss_rois_num_each * 1: ss_rois_num_each * 2], dim=0)
+ det_prob3 = F.softmax(det_score[ss_rois_num_each * 2: ss_rois_num_each * 3], dim=0)
+ det_prob4 = F.softmax(det_score[ss_rois_num_each * 3: ss_rois_num_each * 4], dim=0)
+ det_prob = torch.cat((det_prob1, det_prob2, det_prob3, det_prob4))
+ else:
+ det_prob = F.softmax(det_score, dim=0)
+
+ det_cls_prob_product = torch.mul(cls_score, det_prob)
+ if self._mode == 'TRAIN':
+ ss_rois_num_each = int(det_cls_prob_product.shape[0] / 4)
+ assert ss_rois_num_each == pi
+ det_cls_prob1 = torch.sum(det_cls_prob_product[ss_rois_num_each * 0: ss_rois_num_each * 1], 0)
+ det_cls_prob2 = torch.sum(det_cls_prob_product[ss_rois_num_each * 1: ss_rois_num_each * 2], 0)
+ det_cls_prob3 = torch.sum(det_cls_prob_product[ss_rois_num_each * 2: ss_rois_num_each * 3], 0)
+ det_cls_prob4 = torch.sum(det_cls_prob_product[ss_rois_num_each * 3: ss_rois_num_each * 4], 0)
+ det_cls_prob = torch.stack([det_cls_prob1, det_cls_prob2, det_cls_prob3, det_cls_prob4])
+ det_cls_prob_product2 = torch.mul(cls_prob, det_prob)
+ else:
+ det_cls_prob = torch.sum(det_cls_prob_product, 0)
+ det_cls_prob_product2 = torch.mul(cls_prob, det_prob)
+
+ # bbox_pred = self.bbox_pred_net(fc7)
+ bbox_pred = torch.zeros(cls_prob.shape[0], 80)
+ self._predictions["bbox_pred"] = bbox_pred
+ self._predictions['det_cls_prob'] = det_cls_prob
+ self._predictions['det_cls_prob_product'] = det_cls_prob_product2
+
+ roi_labels_1, keep_inds_1, fg_num_1, bg_num_1 = get_refine_supervision_ac4_IA(det_cls_prob_product2,
+ self._image_gt_summaries['ss_boxes_input'][0],
+ self._image_gt_summaries['image_level_label'],
+ self._im_info)
+ roi_labels_1_new = np.vstack((roi_labels_1[0], roi_labels_1[1], roi_labels_1[2], roi_labels_1[3]))
+ keep_inds_1_new = np.concatenate((keep_inds_1[0], keep_inds_1[1] + pi, keep_inds_1[2] + pi*2, keep_inds_1[3] + pi*3))
+ fg_num_1_new = fg_num_1[0] + fg_num_1[1] + fg_num_1[2] + fg_num_1[3]
+ bg_num_1_new = bg_num_1[0] + bg_num_1[1] + bg_num_1[2] + bg_num_1[3]
+
+ roi_labels_2, keep_inds_2, fg_num_2, bg_num_2 = get_refine_supervision_ac4_IA(refine_prob_1_new,
+ self._image_gt_summaries['ss_boxes_input'][0],
+ self._image_gt_summaries['image_level_label'],
+ self._im_info)
+ roi_labels_2_new = np.vstack((roi_labels_2[0], roi_labels_2[1], roi_labels_2[2], roi_labels_2[2]))
+ keep_inds_2_new = np.concatenate((keep_inds_2[0], keep_inds_2[1] + pi, keep_inds_2[2] + pi*2, keep_inds_2[3] + pi*3))
+ fg_num_2_new = fg_num_2[0] + fg_num_2[1] + fg_num_2[2] + fg_num_2[3]
+ bg_num_2_new = bg_num_2[0] + bg_num_2[1] + bg_num_2[2] + bg_num_2[3]
+
+ self.eval()
+ gt = np.argmax(roi_labels_1_new, axis=1)
+ mask_1 = self._inverted_attention(bbox_feats_new, gt, keep_inds_1_new, 1, step, fg_num_1_new, bg_num_1_new)
+ gt = np.argmax(roi_labels_2_new, axis=1)
+ mask_2 = self._inverted_attention(bbox_feats_new, gt, keep_inds_2_new, 2, step, fg_num_2_new, bg_num_2_new)
+ self.train()
+
+ mask_1 = Variable(mask_1, requires_grad=True)
+ mask_2 = Variable(mask_2, requires_grad=True)
+ pool5_roi_1 = pool5_roi * mask_1
+ pool5_roi_2 = pool5_roi * mask_2
+ fc7_roi_1 = self._head_to_tail(pool5_roi_1)
+ fc7_roi_2 = self._head_to_tail(pool5_roi_2)
+ refine_score_1 = self.refine_net_1(fc7_roi_1)
+ refine_score_2 = self.refine_net_2(fc7_roi_2)
+
+ refine_prob_1 = F.softmax(refine_score_1, dim=1)
+ refine_prob_2 = F.softmax(refine_score_2, dim=1)
+ # refine_prob_3 = F.softmax(refine_score_3, dim=1)
+
+ self._predictions['refine_prob_1'] = refine_prob_1
+ self._predictions['refine_prob_2'] = refine_prob_2
+ # self._predictions['refine_prob_3'] = refine_prob_3
+ return roi_labels_1, keep_inds_1, roi_labels_2, keep_inds_2, bbox_pred
+
+ def _image_to_head(self):
+ raise NotImplementedError
+
+ def _head_to_tail(self, pool5):
+ raise NotImplementedError
+
+ def create_architecture(self, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
+ self._tag = tag
+ self._num_classes = num_classes
+ self._anchor_scales = anchor_scales
+ self._num_scales = len(anchor_scales)
+ self._anchor_ratios = anchor_ratios
+ self._num_ratios = len(anchor_ratios)
+ self._num_anchors = self._num_scales * self._num_ratios
+ assert tag != None
+ # Initialize layers
+ self._init_modules()
+
+ def _init_modules(self):
+ self._init_head_tail()
+ self.cls_score_net = nn.Linear(self._fc7_channels, self._num_classes)
+ self.det_score_net = nn.Linear(self._fc7_channels, self._num_classes)
+ #self.bbox_pred_net = nn.Linear(self._fc7_channels, self._num_classes)
+ self.bbox_pred_net = nn.Linear(self._fc7_channels, self._num_classes * 4)
+ self.refine_net_1 = nn.Linear(self._fc7_channels, self._num_classes + 1)
+ self.refine_net_2 = nn.Linear(self._fc7_channels, self._num_classes + 1)
+ # self.refine_net_3 = nn.Linear(self._fc7_channels, self._num_classes + 1)
+ # self.theta = nn.Conv2d(in_channels=512, out_channels=256,
+ # kernel_size=1, stride=1, padding=0)
+ # self.phi = nn.Conv2d(in_channels=512, out_channels=256,
+ # kernel_size=1, stride=1, padding=0)
+ # self.g = nn.Conv2d(in_channels=512, out_channels=256,
+ # kernel_size=1, stride=1, padding=0)
+ # self.W = nn.Conv2d(in_channels=256, out_channels=512,
+ # kernel_size=1, stride=1, padding=0)
+ # nn.init.constant_(self.W.weight, 0)
+ # nn.init.constant_(self.W.bias, 0)
+ self.init_weights()
+
+ def _run_summary_op(self, val=False):
+ """
+ Run the summary operator: feed the placeholders with corresponding newtork outputs(activations)
+ """
+ summaries = []
+ # Add image gt
+ summaries.append(self._add_gt_image_summary())
+ # Add event_summaries
+ for key, var in self._event_summaries.items(): # __event_summaries is equal to loss itmes
+ summaries.append(tb.summary.scalar(key, var.item()))
+ self._event_summaries = {}
+ return summaries
+
+ def _predict_train(self, ss_boxes, step):
+ torch.backends.cudnn.benchmark = False
+ net_conv = self._image_to_head()
+ i = 0
+ rois = torch.from_numpy(ss_boxes[i]).to(self._device)
+ pool5_roi_0 = self.RoIRingPool(net_conv[i][0:1, :], rois[0, :])
+ pool5_context_0 = self.RoIRingPool_context(net_conv[i][0:1, :], rois[0, :])
+ pool5_frame_0 = self.RoIRingPool_frame(net_conv[i][0:1, :], rois[0, :])
+ pool5_roi_flip_0 = self.RoIRingPool(net_conv[i][1:2, :], rois[1, :])
+ pool5_context_flip_0 = self.RoIRingPool_context(net_conv[i][1:2, :], rois[1, :])
+ pool5_frame_flip_0 = self.RoIRingPool_frame(net_conv[i][1:2, :], rois[1, :])
+ i = 1
+ rois = torch.from_numpy(ss_boxes[i]).to(self._device)
+ pool5_roi_1 = self.RoIRingPool(net_conv[i][0:1, :], rois[0, :])
+ pool5_context_1 = self.RoIRingPool_context(net_conv[i][0:1, :], rois[0, :])
+ pool5_frame_1 = self.RoIRingPool_frame(net_conv[i][0:1, :], rois[0, :])
+ pool5_roi_flip_1 = self.RoIRingPool(net_conv[i][1:2, :], rois[1, :])
+ pool5_context_flip_1 = self.RoIRingPool_context(net_conv[i][1:2, :], rois[1, :])
+ pool5_frame_flip_1 = self.RoIRingPool_frame(net_conv[i][1:2, :], rois[1, :])
+
+ pool5_roi = torch.cat((pool5_roi_0, pool5_roi_flip_0, pool5_roi_1, pool5_roi_flip_1))
+ pool5_context = torch.cat((pool5_context_0, pool5_context_flip_0, pool5_context_1, pool5_context_flip_1))
+ pool5_frame = torch.cat((pool5_frame_0, pool5_frame_flip_0, pool5_frame_1, pool5_frame_flip_1))
+
+ if self._mode == 'TRAIN':
+ torch.backends.cudnn.benchmark = True # benchmark because now the input size are fixed
+ fc7_roi = self._head_to_tail(pool5_roi)
+ fc7_context = self._head_to_tail(pool5_context)
+ fc7_frame = self._head_to_tail(pool5_frame)
+ if self.ca_iw:
+ rois = pool5_roi
+ else:
+ rois = None
+
+ roi_labels_1, keep_inds_1, \
+ roi_labels_2, keep_inds_2, bbox_pred = self._region_classification_train(pool5_roi, fc7_roi,fc7_context, fc7_frame, step)
+ return roi_labels_1, keep_inds_1, roi_labels_2, keep_inds_2, bbox_pred, rois
+
+ def _predict_test(self, ss_boxes):
+ torch.backends.cudnn.benchmark = False
+ net_conv = self._image_to_head()
+ ss_rois = torch.from_numpy(ss_boxes).to(self._device)
+ rois = ss_rois
+ self._predictions["rois"] = rois
+ pool5_roi = self.RoIRingPool(net_conv, rois)
+ pool5_context = self.RoIRingPool_context(net_conv, rois)
+ pool5_frame = self.RoIRingPool_frame(net_conv, rois)
+
+ if self._mode == 'TRAIN':
+ torch.backends.cudnn.benchmark = True # benchmark because now the input size are fixed
+ fc7_roi = self._head_to_tail(pool5_roi)
+ fc7_context = self._head_to_tail(pool5_context)
+ fc7_frame = self._head_to_tail(pool5_frame)
+
+ cls_prob, det_prob, bbox_pred, cls_det_prob_product, det_cls_prob = self._region_classification_test(fc7_roi, fc7_context, fc7_frame)
+ return rois, cls_prob, det_prob, bbox_pred, cls_det_prob_product, det_cls_prob
+
+ def forward(self, image, image_level_label, im_info, gt_boxes=None, ss_boxes=None, step=None, mode='TRAIN'):
+ self._image_gt_summaries['image'] = image
+ self._image_gt_summaries['image_level_label'] = image_level_label
+ self._image_gt_summaries['gt_boxes'] = gt_boxes
+ self._image_gt_summaries['im_info'] = im_info
+ self._mode = mode
+ self._im_info = im_info
+ self._image_level_label = torch.from_numpy(image_level_label) if image_level_label is not None else None
+
+ if mode == 'TEST':
+ self._image_gt_summaries['ss_boxes'] = ss_boxes
+ self._image = torch.from_numpy(image.transpose([0, 3, 1, 2]).copy()).to(self._device)
+ self._gt_boxes = torch.from_numpy(gt_boxes).to(self._device) if gt_boxes is not None else None
+ self.ss_boxes_indexes = self.return_ss_boxes(np.arange(ss_boxes.shape[0]), mode)
+ rois, cls_prob, det_prob, bbox_pred, cls_det_prob_product, det_cls_prob = self._predict_test(ss_boxes[self.ss_boxes_indexes, :])
+ bbox_pred = bbox_pred[:, :80]
+ stds = bbox_pred.data.new(cfg.TRAIN.BBOX_NORMALIZE_STDS).repeat(self._num_classes).unsqueeze(0).expand_as(bbox_pred)
+ means = bbox_pred.data.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS).repeat(self._num_classes).unsqueeze(0).expand_as(bbox_pred)
+ self._predictions["bbox_pred"] = bbox_pred.mul(stds).add(means)
+ else:
+ ss_boxes_all = []
+ self._image = []
+ self._image_gt_summaries['ss_boxes_input'] = []
+ self._image_level_label = torch.from_numpy(image_level_label) if image_level_label is not None else None
+ self.ss_boxes_indexes = self.return_ss_boxes(np.arange(ss_boxes[0].shape[0]), mode)
+ for i in range(2):
+ image_org = torch.from_numpy(image[i].transpose([0, 3, 1, 2]).copy()).to(self._device)
+ self._image.append(image_org)
+ ss_boxes_input = np.stack((ss_boxes[i * 2], ss_boxes[i * 2 + 1]))
+ ss_boxes_all.append(ss_boxes_input[:, self.ss_boxes_indexes, :])
+ self._image_gt_summaries['ss_boxes_input'] = ss_boxes_all
+
+ roi_labels_1, keep_inds_1, roi_labels_2, keep_inds_2, bbox_pred, rois = self._predict_train(ss_boxes_all, step)
+ bbox_pred = bbox_pred[:, :80]
+ self._add_losses(roi_labels_1, keep_inds_1, roi_labels_2, keep_inds_2, step=step, rois=rois)
+
+ def return_ss_boxes(self, boxes_index, mode='TRAIN'):
+ if mode == 'TEST':
+ return boxes_index
+ box_num = min(1000, len(boxes_index))
+ indexes = np.random.choice(boxes_index, size=box_num, replace=False)
+ return indexes
+
+ def init_weights(self):
+ def normal_init(m, mean, stddev, truncated=False):
+ """
+ weight initalizer: truncated normal and random normal.
+ """
+ if truncated:
+ m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
+ else:
+ m.weight.data.normal_(mean, stddev)
+ m.bias.data.zero_()
+
+ # normal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.det_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)
+ normal_init(self.refine_net_1, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ normal_init(self.refine_net_2, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init(self.refine_net_3, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init1(self.theta, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init1(self.phi, 0, 0.01, cfg.TRAIN.TRUNCATED)
+ # normal_init1(self.g, 0, 0.01, cfg.TRAIN.TRUNCATED)
+
+ # Extract the head feature maps, for example for vgg16 it is conv5_3
+ # only useful during testing mode
+ def extract_head(self, image):
+ feat = self._layers["head"](torch.from_numpy(image.transpose([0, 3, 1, 2])).to(self._device))
+ return feat
+
+ # only useful during testing mode
+ def test_image(self, image, im_info, ss_boxes):
+ self.eval()
+ with torch.no_grad():
+ self.forward(image, None, im_info, None, ss_boxes, mode='TEST')
+
+ bbox_pred, rois, det_cls_prob, det_cls_prob_product, refine_prob_1, refine_prob_2 = \
+ self._predictions['bbox_pred'].data.cpu().numpy(), \
+ self._predictions['rois'].data.cpu().numpy(), \
+ self._predictions['det_cls_prob'].data.cpu().numpy(), \
+ self._predictions['det_cls_prob_product'].data.cpu().numpy(), \
+ self._predictions['refine_prob_1'].data.cpu().numpy(), \
+ self._predictions['refine_prob_2'].data.cpu().numpy()
+
+ return bbox_pred, rois, det_cls_prob, det_cls_prob_product, refine_prob_1[:, 1:], refine_prob_2[:, 1:]
+
+ def delete_intermediate_states(self):
+ # Delete intermediate result to save memory
+ for d in [self._losses, self._predictions]:
+ for k in list(d):
+ del d[k]
+
+ def get_summary(self, blobs, step=None):
+ self.eval()
+ self.forward(blobs['data'], blobs['image_level_labels'], blobs['im_info'], blobs['gt_boxes'], blobs['ss_boxes'], step)
+ self.train()
+ summary = self._run_summary_op(True)
+ return summary
+
+ def train_step(self, blobs, train_op, step):
+ self.forward(blobs['data'], blobs['image_level_labels'], blobs['im_info'], blobs['gt_boxes'], blobs['ss_boxes'], step)
+ cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, loss = self._losses['cls_det_loss'].item(), \
+ self._losses['refine_loss_1'].item(), \
+ self._losses['refine_loss_2'].item(), \
+ self._losses['consistency_loss'].item(), \
+ self._losses['total_loss'].item()
+ train_op.zero_grad()
+ self._losses['total_loss'].backward()
+ train_op.step()
+
+ self.delete_intermediate_states()
+ #torch.cuda.empty_cache()
+
+ return cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, loss
+
+ def train_step_with_summary(self, blobs, train_op, step):
+ self.forward(blobs['data'], blobs['image_level_labels'], blobs['im_info'], blobs['gt_boxes'], blobs['ss_boxes'], step)
+ cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, loss = self._losses["cls_det_loss"].item(), \
+ self._losses['refine_loss_1'].item(), \
+ self._losses['refine_loss_2'].item(), \
+ self._losses['consistency_loss'].item(), \
+ self._losses['total_loss'].item()
+ train_op.zero_grad()
+ self._losses['total_loss'].backward()
+ train_op.step()
+ # summary = self._run_summary_op()
+ summary = 0
+ self.delete_intermediate_states()
+ return cls_det_loss, refine_loss_1, refine_loss_2, consistency_loss, loss, summary
+
+
+ def train_step_no_return(self, blobs, train_op):
+ self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], blobs['ss_boxes'])
+ train_op.zero_grad()
+ self._losses['total_loss'].backward()
+ train_op.step()
+ self.delete_intermediate_states()
+
+ def load_state_dict(self, state_dict):
+ """
+ Because we remove the definition of fc layer in resnet now, it will fail when loading
+ the model trained before.
+ To provide back compatibility, we overwrite the load_state_dict
+ """
+ nn.Module.load_state_dict(self, {k: state_dict[k] for k in list(self.state_dict())})
+
+# ----------------------------------------------------------------------------------------------------------------------
+def _get_top_ranking_propoals(probs):
+ """Get top ranking proposals by k-means"""
+ kmeans = KMeans(n_clusters=3, random_state=3).fit(probs)
+ high_score_label = np.argmax(kmeans.cluster_centers_)
+ index = np.where(kmeans.labels_ == high_score_label)[0]
+ if len(index) == 0:
+ index = np.array([np.argmax(probs)])
+ return index
+
+
+def _build_graph(boxes, iou_threshold):
+ """Build graph based on box IoU"""
+ overlaps = bbox_overlaps(
+ boxes.astype(dtype=np.float32, copy=False),
+ boxes.astype(dtype=np.float32, copy=False))
+ return (overlaps > iou_threshold).astype(np.float32)
+
+
+def _get_graph_centers(boxes, cls_prob, im_labels):
+ """Get graph centers."""
+ num_images, num_classes = im_labels.shape
+ assert num_images == 1, 'batch size shoud be equal to 1'
+ im_labels_tmp = im_labels[0, :].copy()
+ gt_boxes = np.zeros((0, 4), dtype=np.float32)
+ gt_classes = np.zeros((0, 1), dtype=np.int32)
+ gt_scores = np.zeros((0, 1), dtype=np.float32)
+ for i in xrange(num_classes):
+ if im_labels_tmp[i] == 1:
+ cls_prob_tmp = cls_prob[:, i].copy()
+ idxs = np.where(cls_prob_tmp >= 0)[0]
+ if idxs.shape[0] == 0:
+ print('kmeans problem')
+ continue
+ idxs_tmp = _get_top_ranking_propoals(cls_prob_tmp[idxs].reshape(-1, 1))
+ idxs = idxs[idxs_tmp]
+ boxes_tmp = boxes[idxs, :].copy()
+ cls_prob_tmp = cls_prob_tmp[idxs]
+ graph = _build_graph(boxes_tmp, 0.4)
+
+ keep_idxs = []
+ gt_scores_tmp = []
+ count = cls_prob_tmp.size
+ while True:
+ order = np.sum(graph, axis=1).argsort()[::-1]
+ tmp = order[0]
+ keep_idxs.append(tmp)
+ inds = np.where(graph[tmp, :] > 0)[0]
+ gt_scores_tmp.append(np.max(cls_prob_tmp[inds]))
+
+ graph[:, inds] = 0
+ graph[inds, :] = 0
+ count = count - len(inds)
+ if count <= 5:
+ break
+
+ gt_boxes_tmp = boxes_tmp[keep_idxs, :].copy()
+ gt_scores_tmp = np.array(gt_scores_tmp).copy()
+
+ keep_idxs_new = np.argsort(gt_scores_tmp)[-1:(-1 - min(len(gt_scores_tmp), 5)):-1]
+
+ gt_boxes = np.vstack((gt_boxes, gt_boxes_tmp[keep_idxs_new, :]))
+ gt_scores = np.vstack((gt_scores,gt_scores_tmp[keep_idxs_new].reshape(-1, 1)))
+ gt_classes = np.vstack((gt_classes,(i + 1) * np.ones((len(keep_idxs_new), 1), dtype=np.int32)))
+
+ # If a proposal is chosen as a cluster center,
+ # we simply delete a proposal from the candidata proposal pool,
+ # because we found that the results of different strategies are similar and this strategy is more efficient
+ cls_prob = np.delete(cls_prob.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
+ boxes = np.delete(boxes.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
+
+ proposals = {'gt_boxes': gt_boxes,
+ 'gt_classes': gt_classes,
+ 'gt_scores': gt_scores}
+
+ return proposals
+
+
+def _get_proposal_clusters(all_rois, proposals, im_labels):
+ """Generate a random sample of RoIs comprising foreground and background
+ examples.
+ """
+ num_images, num_classes = im_labels.shape
+ assert num_images == 1, 'batch size shoud be equal to 1'
+ # overlaps: (rois x gt_boxes)
+ gt_boxes = proposals['gt_boxes']
+ gt_labels = proposals['gt_classes']
+ #gt_scores = proposals['gt_scores']
+ overlaps = bbox_overlaps(
+ all_rois.astype(dtype=np.float32, copy=False),
+ gt_boxes.astype(dtype=np.float32, copy=False))
+ gt_assignment = overlaps.argmax(axis=1)
+ max_overlaps = overlaps.max(axis=1)
+ labels = gt_labels[gt_assignment, 0]
+ # cls_loss_weights = gt_scores[gt_assignment, 0]
+
+ # # Select foreground RoIs as those with >= FG_THRESH overlap
+ # fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
+ #
+ # # Select background RoIs as those with < FG_THRESH overlap
+ # bg_inds = np.where(max_overlaps < cfg.TRAIN.FG_THRESH)[0]
+ #
+ # ig_inds = np.where(max_overlaps < cfg.TRAIN.BG_THRESH)[0]
+
+ # cls_loss_weights[ig_inds] = 0.0
+ #
+ # labels[bg_inds] = 0
+ # gt_assignment[bg_inds] = -1
+ #
+ # img_cls_loss_weights = np.zeros(gt_boxes.shape[0], dtype=np.float32)
+ # pc_probs = np.zeros(gt_boxes.shape[0], dtype=np.float32)
+ # pc_labels = np.zeros(gt_boxes.shape[0], dtype=np.int32)
+ # pc_count = np.zeros(gt_boxes.shape[0], dtype=np.int32)
+ #
+ # for i in xrange(gt_boxes.shape[0]):
+ # po_index = np.where(gt_assignment == i)[0]
+ # img_cls_loss_weights[i] = np.sum(cls_loss_weights[po_index])
+ # pc_labels[i] = gt_labels[i, 0]
+ # pc_count[i] = len(po_index)
+ # pc_probs[i] = np.average(cls_prob[po_index, pc_labels[i]])
+ return max_overlaps, labels
+
+
+def get_refine_supervision_ac4_IA(refine_prob, ss_boxes, image_level_label, im_info=None):
+ '''
+ refine_prob: num_box x 20 or num_box x 21
+ ss_boxes; num_box x 4
+ image_level_label: 1 dim vector with 20 elements
+ '''
+ keep_inds_list = []
+ roi_labels_list = []
+ pi = ss_boxes.shape[1]
+ fg_len = []
+ bg_len = []
+
+ refine_prob_each = (refine_prob[0:pi, :] + refine_prob[pi:pi*2, :] + refine_prob[pi*2:pi*3, :] + refine_prob[pi*3:pi*4, :]) / 4.0
+ for i in range(1):
+ ss_boxes_each = ss_boxes[i, :]
+
+ cls_prob = refine_prob_each.data.cpu().numpy()
+ boxes = ss_boxes_each[:, 1:].copy()
+
+ if refine_prob.shape[1] == image_level_label.shape[1] + 1:
+ cls_prob = cls_prob[:, 1:]
+ roi_labels = np.zeros([pi, image_level_label.shape[1] + 1], dtype=np.int32)
+ roi_labels[:, 0] = 1 # the 0th elements is the bg
+ roi_weights = np.zeros((pi, 1), dtype=np.float32) # num_box x 1 weights of the rois
+
+ eps = 1e-9
+ cls_prob[cls_prob < eps] = eps
+ cls_prob[cls_prob > 1 - eps] = 1 - eps
+ proposals = _get_graph_centers(boxes.copy(), cls_prob.copy(), image_level_label.copy())
+ # proposals_list.append(proposals)
+
+ max_overlaps, labels = _get_proposal_clusters(boxes.copy(), proposals, image_level_label.copy())
+
+ fg_inds = np.where(max_overlaps > cfg.TRAIN.MIL_FG_THRESH)[0]
+
+ roi_labels[fg_inds, labels[fg_inds]] = 1
+ roi_labels[fg_inds, 0] = 0
+
+ bg_inds = (np.array(max_overlaps >= cfg.TRAIN.MIL_BG_THRESH_LO, dtype=np.int32) + np.array(
+ max_overlaps < cfg.TRAIN.MIL_BG_THRESH_HI, dtype=np.int32) == 2).nonzero()[0]
+
+ for m in range(4):
+ if len(fg_inds) > 0 and len(bg_inds) > 0:
+ fg_rois_num = min(cfg.TRAIN.MIL_NUM_FG, len(fg_inds))
+ fg_inds_tmp = fg_inds[np.random.choice(np.arange(0, len(fg_inds)), size=int(fg_rois_num), replace=False)]
+ bg_rois_num = min(cfg.TRAIN.MIL_NUM_BG, len(bg_inds))
+ bg_inds_tmp = bg_inds[np.random.choice(np.arange(0, len(bg_inds)), size=int(bg_rois_num), replace=False)]
+ elif len(fg_inds) > 0:
+ fg_rois_num = min(cfg.TRAIN.MIL_NUM_FG, len(fg_inds))
+ fg_inds_tmp = fg_inds[np.random.choice(np.arange(0, len(fg_inds)), size=int(fg_rois_num), replace=False)]
+ bg_inds_tmp = bg_inds
+ elif len(bg_inds) > 0:
+ bg_rois_num = min(cfg.TRAIN.MIL_NUM_BG, len(bg_inds))
+ bg_inds_tmp = bg_inds[np.random.choice(np.arange(0, len(bg_inds)), size=int(bg_rois_num), replace=False)]
+ fg_inds_tmp = fg_inds
+ else:
+ import pdb
+ pdb.set_trace()
+
+ for n in range(1):
+ keep_inds = np.concatenate([fg_inds_tmp, bg_inds_tmp])
+ keep_inds_list.append(keep_inds)
+ #roi_labels_list.append(roi_labels[keep_inds, :])
+ roi_labels_list.append(roi_labels)
+ fg_len.append(len(fg_inds_tmp))
+ bg_len.append(len(bg_inds_tmp))
+
+ return roi_labels_list, keep_inds_list, fg_len, bg_len
+
diff --git a/lib/nets/resnet_v1.py b/lib/nets/resnet_v1.py
new file mode 100644
index 0000000..d051bb7
--- /dev/null
+++ b/lib/nets/resnet_v1.py
@@ -0,0 +1,186 @@
+# --------------------------------------------------------
+# Tensorflow Faster R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Zheqi He and Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from nets.network import Network
+from model.config import cfg
+
+import utils.timer
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Variable
+import math
+import torch.utils.model_zoo as model_zoo
+
+import torchvision
+from torchvision.models.resnet import BasicBlock, Bottleneck
+
+class ResNet(torchvision.models.resnet.ResNet):
+ def __init__(self, block, layers, num_classes=1000):
+ self.inplanes = 64
+ super(ResNet, self).__init__(block, layers, num_classes)
+ # change to match the caffe resnet
+ for i in range(2, 4):
+ getattr(self, 'layer%d'%i)[0].conv1.stride = (2,2)
+ getattr(self, 'layer%d'%i)[0].conv2.stride = (1,1)
+ # use stride 1 for the last conv4 layer (same as tf-faster-rcnn)
+ self.layer4[0].conv2.stride = (1,1)
+ self.layer4[0].downsample[0].stride = (1,1)
+
+ del self.avgpool, self.fc
+
+
+def resnet18(pretrained=False):
+ """Constructs a ResNet-18 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = ResNet(BasicBlock, [2, 2, 2, 2])
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
+ return model
+
+
+def resnet34(pretrained=False):
+ """Constructs a ResNet-34 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = ResNet(BasicBlock, [3, 4, 6, 3])
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
+ return model
+
+
+def resnet50(pretrained=False):
+ """Constructs a ResNet-50 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = ResNet(Bottleneck, [3, 4, 6, 3])
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
+ return model
+
+
+def resnet101(pretrained=False):
+ """Constructs a ResNet-101 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = ResNet(Bottleneck, [3, 4, 23, 3])
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
+ return model
+
+
+def resnet152(pretrained=False):
+ """Constructs a ResNet-152 model.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = ResNet(Bottleneck, [3, 8, 36, 3])
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
+ return model
+
+class resnetv1(Network):
+ def __init__(self, num_layers=50):
+ Network.__init__(self)
+ self._feat_stride = [16, ]
+ self._feat_compress = [1. / float(self._feat_stride[0]), ]
+ self._num_layers = num_layers
+ self._net_conv_channels = 1024
+ self._fc7_channels = 2048
+
+ def _crop_pool_layer(self, bottom, rois):
+ return Network._crop_pool_layer(self, bottom, rois, cfg.RESNET.MAX_POOL)
+
+ def _image_to_head(self):
+ net_conv = self._layers['head'](self._image)
+ self._act_summaries['conv'] = net_conv
+
+ return net_conv
+
+ def _head_to_tail(self, pool5):
+ fc7 = self.resnet.layer4(pool5).mean(3).mean(2) # average pooling after layer4
+ return fc7
+
+ def _init_head_tail(self):
+ # choose different blocks for different number of layers
+ if self._num_layers == 50:
+ self.resnet = resnet50()
+
+ elif self._num_layers == 101:
+ self.resnet = resnet101()
+
+ elif self._num_layers == 152:
+ self.resnet = resnet152()
+
+ else:
+ # other numbers are not supported
+ raise NotImplementedError
+
+ # Fix blocks
+ for p in self.resnet.bn1.parameters(): p.requires_grad=False
+ for p in self.resnet.conv1.parameters(): p.requires_grad=False
+ assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
+ if cfg.RESNET.FIXED_BLOCKS >= 3:
+ for p in self.resnet.layer3.parameters(): p.requires_grad=False
+ if cfg.RESNET.FIXED_BLOCKS >= 2:
+ for p in self.resnet.layer2.parameters(): p.requires_grad=False
+ if cfg.RESNET.FIXED_BLOCKS >= 1:
+ for p in self.resnet.layer1.parameters(): p.requires_grad=False
+
+ def set_bn_fix(m):
+ classname = m.__class__.__name__
+ if classname.find('BatchNorm') != -1:
+ for p in m.parameters(): p.requires_grad=False
+
+ self.resnet.apply(set_bn_fix)
+
+ # Build resnet.
+ self._layers['head'] = nn.Sequential(self.resnet.conv1, self.resnet.bn1,self.resnet.relu,
+ self.resnet.maxpool,self.resnet.layer1,self.resnet.layer2,self.resnet.layer3)
+
+ def train(self, mode=True):
+ # Override train so that the training mode is set as we want
+ nn.Module.train(self, mode)
+ if mode:
+ # Set fixed blocks to be in eval mode (not really doing anything)
+ self.resnet.eval()
+ if cfg.RESNET.FIXED_BLOCKS <= 3:
+ self.resnet.layer4.train()
+ if cfg.RESNET.FIXED_BLOCKS <= 2:
+ self.resnet.layer3.train()
+ if cfg.RESNET.FIXED_BLOCKS <= 1:
+ self.resnet.layer2.train()
+ if cfg.RESNET.FIXED_BLOCKS == 0:
+ self.resnet.layer1.train()
+
+ # Set batchnorm always in eval mode during training
+ def set_bn_eval(m):
+ classname = m.__class__.__name__
+ if classname.find('BatchNorm') != -1:
+ m.eval()
+
+ self.resnet.apply(set_bn_eval)
+
+ def load_pretrained_cnn(self, state_dict):
+ self.resnet.load_state_dict({k: state_dict[k] for k in list(self.resnet.state_dict())})
+
+
+
+
+
+
+
+
+
diff --git a/lib/nets/vgg16.py b/lib/nets/vgg16.py
new file mode 100644
index 0000000..55249d4
--- /dev/null
+++ b/lib/nets/vgg16.py
@@ -0,0 +1,67 @@
+# --------------------------------------------------------
+# Tensorflow Faster R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Xinlei Chen
+# --------------------------------------------------------
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from nets.network import Network
+from model.config import cfg
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Variable
+import math
+import torchvision.models as models
+
+class MELM_vgg16(Network):
+ def __init__(self):
+ Network.__init__(self)
+ self._feat_stride = [16, ]
+ self._feat_compress = [1. / float(self._feat_stride[0]), ]
+ self._net_conv_channels = 512
+ self._fc7_channels = 4096
+
+ def _init_head_tail(self):
+ self.vgg = models.vgg16()
+ # Remove fc8
+ self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier._modules.values())[:-1])
+
+ # Fix the layers before conv3:
+ for layer in range(10):
+ for p in self.vgg.features[layer].parameters(): p.requires_grad = False
+
+ # not using the last maxpool layer
+ self._layers['head'] = nn.Sequential(*list(self.vgg.features._modules.values())[:-1])
+
+ # ------- parallel Gpu-----------
+ self._layers['head'] = torch.nn.DataParallel(self._layers['head'])
+
+ def _image_to_headtest(self):
+ net_conv = self._layers['head'](self._image)
+ return net_conv
+
+ def _image_to_head(self):
+ if self.training:
+ net_conv = []
+ for i in range(2):
+ net_conv.append(self._layers['head'](self._image[i]))
+ else:
+ net_conv = self._layers['head'](self._image)
+ return net_conv
+
+ def _head_to_tail(self, pool5):
+ pool5_flat = pool5.view(pool5.size(0), -1)
+ fc7 = self.vgg.classifier(pool5_flat)
+ self._predictions['fc7'] = fc7
+
+ return fc7
+
+ def load_pretrained_cnn(self, state_dict):
+ self.vgg.load_state_dict({k:v for k,v in state_dict.items() if k in self.vgg.state_dict()})
+ #self.vgg.classifier = torch.nn.DataParallel(self.vgg.classifier)
+
+
diff --git a/lib/ops/__init__.py b/lib/ops/__init__.py
new file mode 100644
index 0000000..f319180
--- /dev/null
+++ b/lib/ops/__init__.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Sun Sep 29 15:19:42 2019
+
+@author: vasgaoweithu
+"""
+
+from .nms import nms, soft_nms
+from .roi_align import RoIAlign
+from .roi_pool import RoIPool
+from .roi_crop import RoICrop
+from .roi_ring_pool import RoIRingPool
+
+__all__ = ['nms', 'soft_nms', 'RoIAlign', 'RoIPool', 'RoICrop', 'RoIRingPool']
\ No newline at end of file
diff --git a/lib/ops/__pycache__/__init__.cpython-37.pyc b/lib/ops/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000..50b9d83
Binary files /dev/null and b/lib/ops/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/ops/nms/__init__.py b/lib/ops/nms/__init__.py
new file mode 100644
index 0000000..c440704
--- /dev/null
+++ b/lib/ops/nms/__init__.py
@@ -0,0 +1,3 @@
+from .nms_wrapper import nms, soft_nms
+
+__all__ = ['nms', 'soft_nms']
diff --git a/lib/ops/nms/__pycache__/__init__.cpython-37.pyc b/lib/ops/nms/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000..ba7698b
Binary files /dev/null and b/lib/ops/nms/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/ops/nms/__pycache__/nms_wrapper.cpython-37.pyc b/lib/ops/nms/__pycache__/nms_wrapper.cpython-37.pyc
new file mode 100644
index 0000000..f066d87
Binary files /dev/null and b/lib/ops/nms/__pycache__/nms_wrapper.cpython-37.pyc differ
diff --git a/lib/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so b/lib/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..4862476
Binary files /dev/null and b/lib/ops/nms/nms_cpu.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so b/lib/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..4dacae6
Binary files /dev/null and b/lib/ops/nms/nms_cuda.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/ops/nms/nms_wrapper.py b/lib/ops/nms/nms_wrapper.py
new file mode 100644
index 0000000..03215f1
--- /dev/null
+++ b/lib/ops/nms/nms_wrapper.py
@@ -0,0 +1,78 @@
+import numpy as np
+import torch
+
+from . import nms_cpu, nms_cuda
+from .soft_nms_cpu import soft_nms_cpu
+
+
+def nms(dets, iou_thr, device_id=None):
+ """Dispatch to either CPU or GPU NMS implementations.
+
+ The input can be either a torch tensor or numpy array. GPU NMS will be used
+ if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
+ will be used. The returned type will always be the same as inputs.
+
+ Arguments:
+ dets (torch.Tensor or np.ndarray): bboxes with scores.
+ iou_thr (float): IoU threshold for NMS.
+ device_id (int, optional): when `dets` is a numpy array, if `device_id`
+ is None, then cpu nms is used, otherwise gpu_nms will be used.
+
+ Returns:
+ tuple: kept bboxes and indice, which is always the same data type as
+ the input.
+ """
+ # convert dets (tensor or numpy array) to tensor
+ if isinstance(dets, torch.Tensor):
+ is_numpy = False
+ dets_th = dets
+ elif isinstance(dets, np.ndarray):
+ is_numpy = True
+ device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
+ dets_th = torch.from_numpy(dets).to(device)
+ else:
+ raise TypeError(
+ 'dets must be either a Tensor or numpy array, but got {}'.format(
+ type(dets)))
+
+ # execute cpu or cuda nms
+ if dets_th.shape[0] == 0:
+ inds = dets_th.new_zeros(0, dtype=torch.long)
+ else:
+ if dets_th.is_cuda:
+ inds = nms_cuda.nms(dets_th, iou_thr)
+ else:
+ inds = nms_cpu.nms(dets_th, iou_thr)
+
+ if is_numpy:
+ inds = inds.cpu().numpy()
+ return dets[inds, :], inds
+
+
+def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
+ if isinstance(dets, torch.Tensor):
+ is_tensor = True
+ dets_np = dets.detach().cpu().numpy()
+ elif isinstance(dets, np.ndarray):
+ is_tensor = False
+ dets_np = dets
+ else:
+ raise TypeError(
+ 'dets must be either a Tensor or numpy array, but got {}'.format(
+ type(dets)))
+
+ method_codes = {'linear': 1, 'gaussian': 2}
+ if method not in method_codes:
+ raise ValueError('Invalid method for SoftNMS: {}'.format(method))
+ new_dets, inds = soft_nms_cpu(
+ dets_np,
+ iou_thr,
+ method=method_codes[method],
+ sigma=sigma,
+ min_score=min_score)
+
+ if is_tensor:
+ return dets.new_tensor(new_dets), dets.new_tensor(
+ inds, dtype=torch.long)
+ else:
+ return new_dets.astype(np.float32), inds.astype(np.int64)
diff --git a/lib/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so b/lib/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so
new file mode 100644
index 0000000..04e9bb9
Binary files /dev/null and b/lib/ops/nms/soft_nms_cpu.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/ops/nms/src/nms_cpu.cpp b/lib/ops/nms/src/nms_cpu.cpp
new file mode 100644
index 0000000..f7cffb4
--- /dev/null
+++ b/lib/ops/nms/src/nms_cpu.cpp
@@ -0,0 +1,71 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+#include
+
+template
+at::Tensor nms_cpu_kernel(const at::Tensor& dets, const float threshold) {
+ AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
+
+ if (dets.numel() == 0) {
+ return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
+ }
+
+ auto x1_t = dets.select(1, 0).contiguous();
+ auto y1_t = dets.select(1, 1).contiguous();
+ auto x2_t = dets.select(1, 2).contiguous();
+ auto y2_t = dets.select(1, 3).contiguous();
+ auto scores = dets.select(1, 4).contiguous();
+
+ at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1);
+
+ auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
+
+ auto ndets = dets.size(0);
+ at::Tensor suppressed_t =
+ at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU));
+
+ auto suppressed = suppressed_t.data();
+ auto order = order_t.data();
+ auto x1 = x1_t.data();
+ auto y1 = y1_t.data();
+ auto x2 = x2_t.data();
+ auto y2 = y2_t.data();
+ auto areas = areas_t.data();
+
+ for (int64_t _i = 0; _i < ndets; _i++) {
+ auto i = order[_i];
+ if (suppressed[i] == 1) continue;
+ auto ix1 = x1[i];
+ auto iy1 = y1[i];
+ auto ix2 = x2[i];
+ auto iy2 = y2[i];
+ auto iarea = areas[i];
+
+ for (int64_t _j = _i + 1; _j < ndets; _j++) {
+ auto j = order[_j];
+ if (suppressed[j] == 1) continue;
+ auto xx1 = std::max(ix1, x1[j]);
+ auto yy1 = std::max(iy1, y1[j]);
+ auto xx2 = std::min(ix2, x2[j]);
+ auto yy2 = std::min(iy2, y2[j]);
+
+ auto w = std::max(static_cast(0), xx2 - xx1 + 1);
+ auto h = std::max(static_cast(0), yy2 - yy1 + 1);
+ auto inter = w * h;
+ auto ovr = inter / (iarea + areas[j] - inter);
+ if (ovr >= threshold) suppressed[j] = 1;
+ }
+ }
+ return at::nonzero(suppressed_t == 0).squeeze(1);
+}
+
+at::Tensor nms(const at::Tensor& dets, const float threshold) {
+ at::Tensor result;
+ AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] {
+ result = nms_cpu_kernel(dets, threshold);
+ });
+ return result;
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("nms", &nms, "non-maximum suppression");
+}
\ No newline at end of file
diff --git a/lib/ops/nms/src/nms_cuda.cpp b/lib/ops/nms/src/nms_cuda.cpp
new file mode 100644
index 0000000..0ea6f9b
--- /dev/null
+++ b/lib/ops/nms/src/nms_cuda.cpp
@@ -0,0 +1,17 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+#include
+
+#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
+
+at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh);
+
+at::Tensor nms(const at::Tensor& dets, const float threshold) {
+ CHECK_CUDA(dets);
+ if (dets.numel() == 0)
+ return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
+ return nms_cuda(dets, threshold);
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("nms", &nms, "non-maximum suppression");
+}
\ No newline at end of file
diff --git a/lib/ops/nms/src/nms_kernel.cu b/lib/ops/nms/src/nms_kernel.cu
new file mode 100644
index 0000000..9254f2a
--- /dev/null
+++ b/lib/ops/nms/src/nms_kernel.cu
@@ -0,0 +1,131 @@
+// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+int const threadsPerBlock = sizeof(unsigned long long) * 8;
+
+__device__ inline float devIoU(float const * const a, float const * const b) {
+ float left = max(a[0], b[0]), right = min(a[2], b[2]);
+ float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
+ float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
+ float interS = width * height;
+ float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
+ float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
+ return interS / (Sa + Sb - interS);
+}
+
+__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
+ const float *dev_boxes, unsigned long long *dev_mask) {
+ const int row_start = blockIdx.y;
+ const int col_start = blockIdx.x;
+
+ // if (row_start > col_start) return;
+
+ const int row_size =
+ min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
+ const int col_size =
+ min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
+
+ __shared__ float block_boxes[threadsPerBlock * 5];
+ if (threadIdx.x < col_size) {
+ block_boxes[threadIdx.x * 5 + 0] =
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
+ block_boxes[threadIdx.x * 5 + 1] =
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
+ block_boxes[threadIdx.x * 5 + 2] =
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
+ block_boxes[threadIdx.x * 5 + 3] =
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
+ block_boxes[threadIdx.x * 5 + 4] =
+ dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
+ }
+ __syncthreads();
+
+ if (threadIdx.x < row_size) {
+ const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
+ const float *cur_box = dev_boxes + cur_box_idx * 5;
+ int i = 0;
+ unsigned long long t = 0;
+ int start = 0;
+ if (row_start == col_start) {
+ start = threadIdx.x + 1;
+ }
+ for (i = start; i < col_size; i++) {
+ if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
+ t |= 1ULL << i;
+ }
+ }
+ const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
+ dev_mask[cur_box_idx * col_blocks + col_start] = t;
+ }
+}
+
+// boxes is a N x 5 tensor
+at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
+ using scalar_t = float;
+ AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
+ auto scores = boxes.select(1, 4);
+ auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
+ auto boxes_sorted = boxes.index_select(0, order_t);
+
+ int boxes_num = boxes.size(0);
+
+ const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
+
+ scalar_t* boxes_dev = boxes_sorted.data();
+
+ THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
+
+ unsigned long long* mask_dev = NULL;
+ //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
+ // boxes_num * col_blocks * sizeof(unsigned long long)));
+
+ mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
+
+ dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
+ THCCeilDiv(boxes_num, threadsPerBlock));
+ dim3 threads(threadsPerBlock);
+ nms_kernel<<>>(boxes_num,
+ nms_overlap_thresh,
+ boxes_dev,
+ mask_dev);
+
+ std::vector mask_host(boxes_num * col_blocks);
+ THCudaCheck(cudaMemcpy(&mask_host[0],
+ mask_dev,
+ sizeof(unsigned long long) * boxes_num * col_blocks,
+ cudaMemcpyDeviceToHost));
+
+ std::vector remv(col_blocks);
+ memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
+
+ at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
+ int64_t* keep_out = keep.data();
+
+ int num_to_keep = 0;
+ for (int i = 0; i < boxes_num; i++) {
+ int nblock = i / threadsPerBlock;
+ int inblock = i % threadsPerBlock;
+
+ if (!(remv[nblock] & (1ULL << inblock))) {
+ keep_out[num_to_keep++] = i;
+ unsigned long long *p = &mask_host[0] + i * col_blocks;
+ for (int j = nblock; j < col_blocks; j++) {
+ remv[j] |= p[j];
+ }
+ }
+ }
+
+ THCudaFree(state, mask_dev);
+ // TODO improve this part
+ return std::get<0>(order_t.index({
+ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
+ order_t.device(), keep.scalar_type())
+ }).sort(0, false));
+}
\ No newline at end of file
diff --git a/lib/ops/nms/src/soft_nms_cpu.cpp b/lib/ops/nms/src/soft_nms_cpu.cpp
new file mode 100644
index 0000000..9615693
--- /dev/null
+++ b/lib/ops/nms/src/soft_nms_cpu.cpp
@@ -0,0 +1,10323 @@
+/* Generated by Cython 0.29.12 */
+
+/* BEGIN: Cython Metadata
+{
+ "distutils": {
+ "depends": [
+ "/home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h",
+ "/home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h"
+ ],
+ "extra_compile_args": {
+ "cxx": [
+ "-Wno-unused-function",
+ "-Wno-write-strings"
+ ]
+ },
+ "include_dirs": [
+ "/home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/numpy/core/include"
+ ],
+ "language": "c++",
+ "name": "mmdet.ops.nms.soft_nms_cpu",
+ "sources": [
+ "mmdet/ops/nms/src/soft_nms_cpu.pyx"
+ ]
+ },
+ "module_name": "mmdet.ops.nms.soft_nms_cpu"
+}
+END: Cython Metadata */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_12"
+#define CYTHON_HEX_VERSION 0x001D0CF0
+#define CYTHON_FUTURE_DIVISION 1
+#include
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef __cplusplus
+ #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
+#endif
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #else
+ #define CYTHON_INLINE inline
+ #endif
+#endif
+template
+void __Pyx_call_destructor(T& x) {
+ x.~T();
+}
+template
+class __Pyx_FakeReference {
+ public:
+ __Pyx_FakeReference() : ptr(NULL) { }
+ __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { }
+ T *operator->() { return ptr; }
+ T *operator&() { return ptr; }
+ operator T&() { return *ptr; }
+ template bool operator ==(U other) { return *ptr == other; }
+ template bool operator !=(U other) { return *ptr != other; }
+ private:
+ T *ptr;
+};
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+ #define PyObject_Unicode PyObject_Str
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+{ \
+ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
+}
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__mmdet__ops__nms__soft_nms_cpu
+#define __PYX_HAVE_API__mmdet__ops__nms__soft_nms_cpu
+/* Early includes */
+#include
+#include
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#ifdef _OPENMP
+#include
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+/* Header.proto */
+#if !defined(CYTHON_CCOMPLEX)
+ #if defined(__cplusplus)
+ #define CYTHON_CCOMPLEX 1
+ #elif defined(_Complex_I)
+ #define CYTHON_CCOMPLEX 1
+ #else
+ #define CYTHON_CCOMPLEX 0
+ #endif
+#endif
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ #include
+ #else
+ #include
+ #endif
+#endif
+#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
+ #undef _Complex_I
+ #define _Complex_I 1.0fj
+#endif
+
+
+static const char *__pyx_f[] = {
+ "mmdet/ops/nms/src/soft_nms_cpu.pyx",
+ "__init__.pxd",
+ "type.pxd",
+};
+/* BufferFormatStructs.proto */
+#define IS_UNSIGNED(type) (((type) -1) > 0)
+struct __Pyx_StructField_;
+#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
+typedef struct {
+ const char* name;
+ struct __Pyx_StructField_* fields;
+ size_t size;
+ size_t arraysize[8];
+ int ndim;
+ char typegroup;
+ char is_unsigned;
+ int flags;
+} __Pyx_TypeInfo;
+typedef struct __Pyx_StructField_ {
+ __Pyx_TypeInfo* type;
+ const char* name;
+ size_t offset;
+} __Pyx_StructField;
+typedef struct {
+ __Pyx_StructField* field;
+ size_t parent_offset;
+} __Pyx_BufFmt_StackElem;
+typedef struct {
+ __Pyx_StructField root;
+ __Pyx_BufFmt_StackElem* head;
+ size_t fmt_offset;
+ size_t new_count, enc_count;
+ size_t struct_alignment;
+ int is_complex;
+ char enc_type;
+ char new_packmode;
+ char enc_packmode;
+ char is_valid_array;
+} __Pyx_BufFmt_Context;
+
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":776
+ * # in Cython to enable them only on the right systems.
+ *
+ * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
+ * ctypedef npy_int16 int16_t
+ * ctypedef npy_int32 int32_t
+ */
+typedef npy_int8 __pyx_t_5numpy_int8_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ *
+ * ctypedef npy_int8 int8_t
+ * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
+ * ctypedef npy_int32 int32_t
+ * ctypedef npy_int64 int64_t
+ */
+typedef npy_int16 __pyx_t_5numpy_int16_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+ * ctypedef npy_int8 int8_t
+ * ctypedef npy_int16 int16_t
+ * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
+ * ctypedef npy_int64 int64_t
+ * #ctypedef npy_int96 int96_t
+ */
+typedef npy_int32 __pyx_t_5numpy_int32_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":779
+ * ctypedef npy_int16 int16_t
+ * ctypedef npy_int32 int32_t
+ * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
+ * #ctypedef npy_int96 int96_t
+ * #ctypedef npy_int128 int128_t
+ */
+typedef npy_int64 __pyx_t_5numpy_int64_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ * #ctypedef npy_int128 int128_t
+ *
+ * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uint16 uint16_t
+ * ctypedef npy_uint32 uint32_t
+ */
+typedef npy_uint8 __pyx_t_5numpy_uint8_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
+ *
+ * ctypedef npy_uint8 uint8_t
+ * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uint32 uint32_t
+ * ctypedef npy_uint64 uint64_t
+ */
+typedef npy_uint16 __pyx_t_5numpy_uint16_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":785
+ * ctypedef npy_uint8 uint8_t
+ * ctypedef npy_uint16 uint16_t
+ * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uint64 uint64_t
+ * #ctypedef npy_uint96 uint96_t
+ */
+typedef npy_uint32 __pyx_t_5numpy_uint32_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
+ * ctypedef npy_uint16 uint16_t
+ * ctypedef npy_uint32 uint32_t
+ * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
+ * #ctypedef npy_uint96 uint96_t
+ * #ctypedef npy_uint128 uint128_t
+ */
+typedef npy_uint64 __pyx_t_5numpy_uint64_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ * #ctypedef npy_uint128 uint128_t
+ *
+ * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
+ * ctypedef npy_float64 float64_t
+ * #ctypedef npy_float80 float80_t
+ */
+typedef npy_float32 __pyx_t_5numpy_float32_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ *
+ * ctypedef npy_float32 float32_t
+ * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
+ * #ctypedef npy_float80 float80_t
+ * #ctypedef npy_float128 float128_t
+ */
+typedef npy_float64 __pyx_t_5numpy_float64_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":800
+ * # The int types are mapped a bit surprising --
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long int_t # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong long_t
+ * ctypedef npy_longlong longlong_t
+ */
+typedef npy_long __pyx_t_5numpy_int_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long int_t
+ * ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong longlong_t
+ *
+ */
+typedef npy_longlong __pyx_t_5numpy_long_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+ * ctypedef npy_long int_t
+ * ctypedef npy_longlong long_t
+ * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_ulong uint_t
+ */
+typedef npy_longlong __pyx_t_5numpy_longlong_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
+ * ctypedef npy_longlong longlong_t
+ *
+ * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong ulong_t
+ * ctypedef npy_ulonglong ulonglong_t
+ */
+typedef npy_ulong __pyx_t_5numpy_uint_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
+ *
+ * ctypedef npy_ulong uint_t
+ * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong ulonglong_t
+ *
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
+ * ctypedef npy_ulong uint_t
+ * ctypedef npy_ulonglong ulong_t
+ * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_intp intp_t
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":808
+ * ctypedef npy_ulonglong ulonglong_t
+ *
+ * ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uintp uintp_t
+ *
+ */
+typedef npy_intp __pyx_t_5numpy_intp_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":809
+ *
+ * ctypedef npy_intp intp_t
+ * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_double float_t
+ */
+typedef npy_uintp __pyx_t_5numpy_uintp_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":811
+ * ctypedef npy_uintp uintp_t
+ *
+ * ctypedef npy_double float_t # <<<<<<<<<<<<<<
+ * ctypedef npy_double double_t
+ * ctypedef npy_longdouble longdouble_t
+ */
+typedef npy_double __pyx_t_5numpy_float_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":812
+ *
+ * ctypedef npy_double float_t
+ * ctypedef npy_double double_t # <<<<<<<<<<<<<<
+ * ctypedef npy_longdouble longdouble_t
+ *
+ */
+typedef npy_double __pyx_t_5numpy_double_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
+ * ctypedef npy_double float_t
+ * ctypedef npy_double double_t
+ * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_cfloat cfloat_t
+ */
+typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
+/* Declarations.proto */
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ typedef ::std::complex< float > __pyx_t_float_complex;
+ #else
+ typedef float _Complex __pyx_t_float_complex;
+ #endif
+#else
+ typedef struct { float real, imag; } __pyx_t_float_complex;
+#endif
+static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
+
+/* Declarations.proto */
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ typedef ::std::complex< double > __pyx_t_double_complex;
+ #else
+ typedef double _Complex __pyx_t_double_complex;
+ #endif
+#else
+ typedef struct { double real, imag; } __pyx_t_double_complex;
+#endif
+static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
+
+
+/*--- Type declarations ---*/
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
+ * ctypedef npy_longdouble longdouble_t
+ *
+ * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
+ * ctypedef npy_cdouble cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t
+ */
+typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+ *
+ * ctypedef npy_cfloat cfloat_t
+ * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
+ * ctypedef npy_clongdouble clongdouble_t
+ *
+ */
+typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
+ * ctypedef npy_cfloat cfloat_t
+ * ctypedef npy_cdouble cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_cdouble complex_t
+ */
+typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
+ * ctypedef npy_clongdouble clongdouble_t
+ *
+ * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew1(a):
+ */
+typedef npy_cdouble __pyx_t_5numpy_complex_t;
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* ArgTypeTest.proto */
+#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
+ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
+ __Pyx__ArgTypeTest(obj, type, name, exact))
+static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
+
+/* IsLittleEndian.proto */
+static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
+
+/* BufferFormatCheck.proto */
+static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
+static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
+ __Pyx_BufFmt_StackElem* stack,
+ __Pyx_TypeInfo* type);
+
+/* BufferGetAndValidate.proto */
+#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
+ ((obj == Py_None || obj == NULL) ?\
+ (__Pyx_ZeroBuffer(buf), 0) :\
+ __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
+static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
+ __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
+static void __Pyx_ZeroBuffer(Py_buffer* buf);
+static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
+static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallNoArg.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* GetItemInt.proto */
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
+ __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck);
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* GetModuleGlobalName.proto */
+#if CYTHON_USE_DICT_VERSIONS
+#define __Pyx_GetModuleGlobalName(var, name) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
+ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
+ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
+ PY_UINT64_T __pyx_dict_version;\
+ PyObject *__pyx_dict_cached_value;\
+ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
+#else
+#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
+#endif
+
+/* PyObjectCall2Args.proto */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
+
+/* ObjectGetItem.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
+#else
+#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
+#endif
+
+/* PyIntBinop.proto */
+#if !CYTHON_COMPILING_IN_PYPY
+static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
+#else
+#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
+ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
+#endif
+
+/* SetItemInt.proto */
+#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
+ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
+ __Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
+static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
+static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
+ int is_list, int wraparound, int boundscheck);
+
+/* SliceObject.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+ PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+ int has_cstart, int has_cstop, int wraparound);
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* RaiseException.proto */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+/* DictGetItem.proto */
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
+#define __Pyx_PyObject_Dict_GetItem(obj, name)\
+ (likely(PyDict_CheckExact(obj)) ?\
+ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
+#else
+#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
+#endif
+
+/* RaiseTooManyValuesToUnpack.proto */
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+/* RaiseNeedMoreValuesToUnpack.proto */
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+/* RaiseNoneIterError.proto */
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
+
+/* ExtTypeTest.proto */
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
+
+/* GetTopmostException.proto */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
+#endif
+
+/* SaveResetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+#else
+#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
+#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
+#endif
+
+/* PyErrExceptionMatches.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
+#else
+#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
+#endif
+
+/* GetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* TypeImport.proto */
+#ifndef __PYX_HAVE_RT_ImportType_proto
+#define __PYX_HAVE_RT_ImportType_proto
+enum __Pyx_ImportType_CheckSize {
+ __Pyx_ImportType_CheckSize_Error = 0,
+ __Pyx_ImportType_CheckSize_Warn = 1,
+ __Pyx_ImportType_CheckSize_Ignore = 2
+};
+static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
+#endif
+
+/* Import.proto */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* BufferStructDeclare.proto */
+typedef struct {
+ Py_ssize_t shape, strides, suboffsets;
+} __Pyx_Buf_DimInfo;
+typedef struct {
+ size_t refcount;
+ Py_buffer pybuffer;
+} __Pyx_Buffer;
+typedef struct {
+ __Pyx_Buffer *rcbuffer;
+ char *data;
+ __Pyx_Buf_DimInfo diminfo[8];
+} __Pyx_LocalBuf_ND;
+
+#if PY_MAJOR_VERSION < 3
+ static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
+ static void __Pyx_ReleaseBuffer(Py_buffer *view);
+#else
+ #define __Pyx_GetBuffer PyObject_GetBuffer
+ #define __Pyx_ReleaseBuffer PyBuffer_Release
+#endif
+
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+/* RealImag.proto */
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ #define __Pyx_CREAL(z) ((z).real())
+ #define __Pyx_CIMAG(z) ((z).imag())
+ #else
+ #define __Pyx_CREAL(z) (__real__(z))
+ #define __Pyx_CIMAG(z) (__imag__(z))
+ #endif
+#else
+ #define __Pyx_CREAL(z) ((z).real)
+ #define __Pyx_CIMAG(z) ((z).imag)
+#endif
+#if defined(__cplusplus) && CYTHON_CCOMPLEX\
+ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
+ #define __Pyx_SET_CREAL(z,x) ((z).real(x))
+ #define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
+#else
+ #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
+ #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
+#endif
+
+/* Arithmetic.proto */
+#if CYTHON_CCOMPLEX
+ #define __Pyx_c_eq_float(a, b) ((a)==(b))
+ #define __Pyx_c_sum_float(a, b) ((a)+(b))
+ #define __Pyx_c_diff_float(a, b) ((a)-(b))
+ #define __Pyx_c_prod_float(a, b) ((a)*(b))
+ #define __Pyx_c_quot_float(a, b) ((a)/(b))
+ #define __Pyx_c_neg_float(a) (-(a))
+ #ifdef __cplusplus
+ #define __Pyx_c_is_zero_float(z) ((z)==(float)0)
+ #define __Pyx_c_conj_float(z) (::std::conj(z))
+ #if 1
+ #define __Pyx_c_abs_float(z) (::std::abs(z))
+ #define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
+ #endif
+ #else
+ #define __Pyx_c_is_zero_float(z) ((z)==0)
+ #define __Pyx_c_conj_float(z) (conjf(z))
+ #if 1
+ #define __Pyx_c_abs_float(z) (cabsf(z))
+ #define __Pyx_c_pow_float(a, b) (cpowf(a, b))
+ #endif
+ #endif
+#else
+ static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
+ static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
+ #if 1
+ static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
+ #endif
+#endif
+
+/* Arithmetic.proto */
+#if CYTHON_CCOMPLEX
+ #define __Pyx_c_eq_double(a, b) ((a)==(b))
+ #define __Pyx_c_sum_double(a, b) ((a)+(b))
+ #define __Pyx_c_diff_double(a, b) ((a)-(b))
+ #define __Pyx_c_prod_double(a, b) ((a)*(b))
+ #define __Pyx_c_quot_double(a, b) ((a)/(b))
+ #define __Pyx_c_neg_double(a) (-(a))
+ #ifdef __cplusplus
+ #define __Pyx_c_is_zero_double(z) ((z)==(double)0)
+ #define __Pyx_c_conj_double(z) (::std::conj(z))
+ #if 1
+ #define __Pyx_c_abs_double(z) (::std::abs(z))
+ #define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
+ #endif
+ #else
+ #define __Pyx_c_is_zero_double(z) ((z)==0)
+ #define __Pyx_c_conj_double(z) (conj(z))
+ #if 1
+ #define __Pyx_c_abs_double(z) (cabs(z))
+ #define __Pyx_c_pow_double(a, b) (cpow(a, b))
+ #endif
+ #endif
+#else
+ static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
+ static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
+ #if 1
+ static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
+ #endif
+#endif
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'numpy' */
+
+/* Module declarations from 'numpy' */
+static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
+static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
+static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
+
+/* Module declarations from 'mmdet.ops.nms.soft_nms_cpu' */
+static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_max(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/
+static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_min(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/
+static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
+#define __Pyx_MODULE_NAME "mmdet.ops.nms.soft_nms_cpu"
+extern int __pyx_module_is_main_mmdet__ops__nms__soft_nms_cpu;
+int __pyx_module_is_main_mmdet__ops__nms__soft_nms_cpu = 0;
+
+/* Implementation of 'mmdet.ops.nms.soft_nms_cpu' */
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_RuntimeError;
+static PyObject *__pyx_builtin_ImportError;
+static const char __pyx_k_N[] = "N";
+static const char __pyx_k_i[] = "i";
+static const char __pyx_k_s[] = "s";
+static const char __pyx_k_ih[] = "ih";
+static const char __pyx_k_iw[] = "iw";
+static const char __pyx_k_np[] = "np";
+static const char __pyx_k_ov[] = "ov";
+static const char __pyx_k_ti[] = "ti";
+static const char __pyx_k_ts[] = "ts";
+static const char __pyx_k_ua[] = "ua";
+static const char __pyx_k_x1[] = "x1";
+static const char __pyx_k_x2[] = "x2";
+static const char __pyx_k_y1[] = "y1";
+static const char __pyx_k_y2[] = "y2";
+static const char __pyx_k_exp[] = "exp";
+static const char __pyx_k_pos[] = "pos";
+static const char __pyx_k_tx1[] = "tx1";
+static const char __pyx_k_tx2[] = "tx2";
+static const char __pyx_k_ty1[] = "ty1";
+static const char __pyx_k_ty2[] = "ty2";
+static const char __pyx_k_area[] = "area";
+static const char __pyx_k_copy[] = "copy";
+static const char __pyx_k_inds[] = "inds";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_boxes[] = "boxes";
+static const char __pyx_k_numpy[] = "numpy";
+static const char __pyx_k_range[] = "range";
+static const char __pyx_k_shape[] = "shape";
+static const char __pyx_k_sigma[] = "sigma";
+static const char __pyx_k_arange[] = "arange";
+static const char __pyx_k_import[] = "__import__";
+static const char __pyx_k_maxpos[] = "maxpos";
+static const char __pyx_k_method[] = "method";
+static const char __pyx_k_weight[] = "weight";
+static const char __pyx_k_iou_thr[] = "iou_thr";
+static const char __pyx_k_box_area[] = "box_area";
+static const char __pyx_k_boxes_in[] = "boxes_in";
+static const char __pyx_k_maxscore[] = "maxscore";
+static const char __pyx_k_min_score[] = "min_score";
+static const char __pyx_k_ValueError[] = "ValueError";
+static const char __pyx_k_ImportError[] = "ImportError";
+static const char __pyx_k_RuntimeError[] = "RuntimeError";
+static const char __pyx_k_soft_nms_cpu[] = "soft_nms_cpu";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_mmdet_ops_nms_soft_nms_cpu[] = "mmdet.ops.nms.soft_nms_cpu";
+static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
+static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
+static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
+static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
+static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
+static const char __pyx_k_mmdet_ops_nms_src_soft_nms_cpu_p[] = "mmdet/ops/nms/src/soft_nms_cpu.pyx";
+static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
+static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
+static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
+static PyObject *__pyx_n_s_ImportError;
+static PyObject *__pyx_n_s_N;
+static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
+static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_n_s_ValueError;
+static PyObject *__pyx_n_s_arange;
+static PyObject *__pyx_n_s_area;
+static PyObject *__pyx_n_s_box_area;
+static PyObject *__pyx_n_s_boxes;
+static PyObject *__pyx_n_s_boxes_in;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_copy;
+static PyObject *__pyx_n_s_exp;
+static PyObject *__pyx_n_s_i;
+static PyObject *__pyx_n_s_ih;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_inds;
+static PyObject *__pyx_n_s_iou_thr;
+static PyObject *__pyx_n_s_iw;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_maxpos;
+static PyObject *__pyx_n_s_maxscore;
+static PyObject *__pyx_n_s_method;
+static PyObject *__pyx_n_s_min_score;
+static PyObject *__pyx_n_s_mmdet_ops_nms_soft_nms_cpu;
+static PyObject *__pyx_kp_s_mmdet_ops_nms_src_soft_nms_cpu_p;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
+static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
+static PyObject *__pyx_n_s_np;
+static PyObject *__pyx_n_s_numpy;
+static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to;
+static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor;
+static PyObject *__pyx_n_s_ov;
+static PyObject *__pyx_n_s_pos;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_s;
+static PyObject *__pyx_n_s_shape;
+static PyObject *__pyx_n_s_sigma;
+static PyObject *__pyx_n_s_soft_nms_cpu;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_ti;
+static PyObject *__pyx_n_s_ts;
+static PyObject *__pyx_n_s_tx1;
+static PyObject *__pyx_n_s_tx2;
+static PyObject *__pyx_n_s_ty1;
+static PyObject *__pyx_n_s_ty2;
+static PyObject *__pyx_n_s_ua;
+static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
+static PyObject *__pyx_n_s_weight;
+static PyObject *__pyx_n_s_x1;
+static PyObject *__pyx_n_s_x2;
+static PyObject *__pyx_n_s_y1;
+static PyObject *__pyx_n_s_y2;
+static PyObject *__pyx_pf_5mmdet_3ops_3nms_12soft_nms_cpu_soft_nms_cpu(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes_in, float __pyx_v_iou_thr, unsigned int __pyx_v_method, float __pyx_v_sigma, float __pyx_v_min_score); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
+static PyObject *__pyx_int_0;
+static PyObject *__pyx_int_1;
+static PyObject *__pyx_int_2;
+static PyObject *__pyx_int_3;
+static PyObject *__pyx_int_4;
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_tuple__2;
+static PyObject *__pyx_tuple__3;
+static PyObject *__pyx_tuple__4;
+static PyObject *__pyx_tuple__5;
+static PyObject *__pyx_tuple__6;
+static PyObject *__pyx_tuple__7;
+static PyObject *__pyx_tuple__8;
+static PyObject *__pyx_codeobj__9;
+/* Late includes */
+
+/* "mmdet/ops/nms/src/soft_nms_cpu.pyx":15
+ *
+ *
+ * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
+ * return a if a >= b else b
+ *
+ */
+
+static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_max(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) {
+ __pyx_t_5numpy_float32_t __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __pyx_t_5numpy_float32_t __pyx_t_1;
+ __Pyx_RefNannySetupContext("max", 0);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":16
+ *
+ * cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
+ * return a if a >= b else b # <<<<<<<<<<<<<<
+ *
+ * cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
+ */
+ if (((__pyx_v_a >= __pyx_v_b) != 0)) {
+ __pyx_t_1 = __pyx_v_a;
+ } else {
+ __pyx_t_1 = __pyx_v_b;
+ }
+ __pyx_r = __pyx_t_1;
+ goto __pyx_L0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":15
+ *
+ *
+ * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
+ * return a if a >= b else b
+ *
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "mmdet/ops/nms/src/soft_nms_cpu.pyx":18
+ * return a if a >= b else b
+ *
+ * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
+ * return a if a <= b else b
+ *
+ */
+
+static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_min(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) {
+ __pyx_t_5numpy_float32_t __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __pyx_t_5numpy_float32_t __pyx_t_1;
+ __Pyx_RefNannySetupContext("min", 0);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":19
+ *
+ * cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
+ * return a if a <= b else b # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ if (((__pyx_v_a <= __pyx_v_b) != 0)) {
+ __pyx_t_1 = __pyx_v_a;
+ } else {
+ __pyx_t_1 = __pyx_v_b;
+ }
+ __pyx_r = __pyx_t_1;
+ goto __pyx_L0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":18
+ * return a if a >= b else b
+ *
+ * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
+ * return a if a <= b else b
+ *
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "mmdet/ops/nms/src/soft_nms_cpu.pyx":22
+ *
+ *
+ * def soft_nms_cpu( # <<<<<<<<<<<<<<
+ * np.ndarray[float, ndim=2] boxes_in,
+ * float iou_thr,
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_5mmdet_3ops_3nms_12soft_nms_cpu_1soft_nms_cpu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_5mmdet_3ops_3nms_12soft_nms_cpu_1soft_nms_cpu = {"soft_nms_cpu", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5mmdet_3ops_3nms_12soft_nms_cpu_1soft_nms_cpu, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_5mmdet_3ops_3nms_12soft_nms_cpu_1soft_nms_cpu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyArrayObject *__pyx_v_boxes_in = 0;
+ float __pyx_v_iou_thr;
+ unsigned int __pyx_v_method;
+ float __pyx_v_sigma;
+ float __pyx_v_min_score;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("soft_nms_cpu (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes_in,&__pyx_n_s_iou_thr,&__pyx_n_s_method,&__pyx_n_s_sigma,&__pyx_n_s_min_score,0};
+ PyObject* values[5] = {0,0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_boxes_in)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_iou_thr)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("soft_nms_cpu", 0, 2, 5, 1); __PYX_ERR(0, 22, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_method);
+ if (value) { values[2] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_min_score);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "soft_nms_cpu") < 0)) __PYX_ERR(0, 22, __pyx_L3_error)
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_boxes_in = ((PyArrayObject *)values[0]);
+ __pyx_v_iou_thr = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_iou_thr == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 24, __pyx_L3_error)
+ if (values[2]) {
+ __pyx_v_method = __Pyx_PyInt_As_unsigned_int(values[2]); if (unlikely((__pyx_v_method == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error)
+ } else {
+ __pyx_v_method = ((unsigned int)1);
+ }
+ if (values[3]) {
+ __pyx_v_sigma = __pyx_PyFloat_AsFloat(values[3]); if (unlikely((__pyx_v_sigma == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error)
+ } else {
+ __pyx_v_sigma = ((float)0.5);
+ }
+ if (values[4]) {
+ __pyx_v_min_score = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_min_score == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error)
+ } else {
+ __pyx_v_min_score = ((float)0.001);
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("soft_nms_cpu", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 22, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("mmdet.ops.nms.soft_nms_cpu.soft_nms_cpu", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_boxes_in), __pyx_ptype_5numpy_ndarray, 1, "boxes_in", 0))) __PYX_ERR(0, 23, __pyx_L1_error)
+ __pyx_r = __pyx_pf_5mmdet_3ops_3nms_12soft_nms_cpu_soft_nms_cpu(__pyx_self, __pyx_v_boxes_in, __pyx_v_iou_thr, __pyx_v_method, __pyx_v_sigma, __pyx_v_min_score);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_5mmdet_3ops_3nms_12soft_nms_cpu_soft_nms_cpu(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes_in, float __pyx_v_iou_thr, unsigned int __pyx_v_method, float __pyx_v_sigma, float __pyx_v_min_score) {
+ PyObject *__pyx_v_boxes = NULL;
+ int __pyx_v_N;
+ float __pyx_v_iw;
+ float __pyx_v_ih;
+ float __pyx_v_ua;
+ int __pyx_v_pos;
+ float __pyx_v_maxscore;
+ int __pyx_v_maxpos;
+ float __pyx_v_x1;
+ float __pyx_v_x2;
+ float __pyx_v_y1;
+ float __pyx_v_y2;
+ float __pyx_v_tx1;
+ float __pyx_v_tx2;
+ float __pyx_v_ty1;
+ float __pyx_v_ty2;
+ float __pyx_v_ts;
+ float __pyx_v_area;
+ float __pyx_v_weight;
+ float __pyx_v_ov;
+ PyObject *__pyx_v_inds = NULL;
+ PyObject *__pyx_v_i = NULL;
+ PyObject *__pyx_v_ti = NULL;
+ CYTHON_UNUSED PyObject *__pyx_v_s = NULL;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_boxes_in;
+ __Pyx_Buffer __pyx_pybuffer_boxes_in;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ Py_ssize_t __pyx_t_6;
+ PyObject *(*__pyx_t_7)(PyObject *);
+ float __pyx_t_8;
+ int __pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ long __pyx_t_11;
+ __Pyx_RefNannySetupContext("soft_nms_cpu", 0);
+ __pyx_pybuffer_boxes_in.pybuffer.buf = NULL;
+ __pyx_pybuffer_boxes_in.refcount = 0;
+ __pyx_pybuffernd_boxes_in.data = NULL;
+ __pyx_pybuffernd_boxes_in.rcbuffer = &__pyx_pybuffer_boxes_in;
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer, (PyObject*)__pyx_v_boxes_in, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 22, __pyx_L1_error)
+ }
+ __pyx_pybuffernd_boxes_in.diminfo[0].strides = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_boxes_in.diminfo[0].shape = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_boxes_in.diminfo[1].strides = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_boxes_in.diminfo[1].shape = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.shape[1];
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":29
+ * float min_score=0.001,
+ * ):
+ * boxes = boxes_in.copy() # <<<<<<<<<<<<<<
+ * cdef int N = boxes.shape[0]
+ * cdef float iw, ih, box_area
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_boxes_in), __pyx_n_s_copy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_boxes = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":30
+ * ):
+ * boxes = boxes_in.copy()
+ * cdef int N = boxes.shape[0] # <<<<<<<<<<<<<<
+ * cdef float iw, ih, box_area
+ * cdef float ua
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_boxes, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_N = __pyx_t_4;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":33
+ * cdef float iw, ih, box_area
+ * cdef float ua
+ * cdef int pos = 0 # <<<<<<<<<<<<<<
+ * cdef float maxscore = 0
+ * cdef int maxpos = 0
+ */
+ __pyx_v_pos = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":34
+ * cdef float ua
+ * cdef int pos = 0
+ * cdef float maxscore = 0 # <<<<<<<<<<<<<<
+ * cdef int maxpos = 0
+ * cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov
+ */
+ __pyx_v_maxscore = 0.0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":35
+ * cdef int pos = 0
+ * cdef float maxscore = 0
+ * cdef int maxpos = 0 # <<<<<<<<<<<<<<
+ * cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov
+ * inds = np.arange(N)
+ */
+ __pyx_v_maxpos = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":37
+ * cdef int maxpos = 0
+ * cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov
+ * inds = np.arange(N) # <<<<<<<<<<<<<<
+ *
+ * for i in range(N):
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_arange); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_5, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_inds = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":39
+ * inds = np.arange(N)
+ *
+ * for i in range(N): # <<<<<<<<<<<<<<
+ * maxscore = boxes[i, 4]
+ * maxpos = i
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) {
+ __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_6 = 0;
+ __pyx_t_7 = NULL;
+ } else {
+ __pyx_t_6 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_7 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 39, __pyx_L1_error)
+ }
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ for (;;) {
+ if (likely(!__pyx_t_7)) {
+ if (likely(PyList_CheckExact(__pyx_t_2))) {
+ if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_2)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_3); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 39, __pyx_L1_error)
+ #else
+ __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ #endif
+ } else {
+ if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_3); __pyx_t_6++; if (unlikely(0 < 0)) __PYX_ERR(0, 39, __pyx_L1_error)
+ #else
+ __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ #endif
+ }
+ } else {
+ __pyx_t_3 = __pyx_t_7(__pyx_t_2);
+ if (unlikely(!__pyx_t_3)) {
+ PyObject* exc_type = PyErr_Occurred();
+ if (exc_type) {
+ if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+ else __PYX_ERR(0, 39, __pyx_L1_error)
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_3);
+ }
+ __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":40
+ *
+ * for i in range(N):
+ * maxscore = boxes[i, 4] # <<<<<<<<<<<<<<
+ * maxpos = i
+ *
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_maxscore = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":41
+ * for i in range(N):
+ * maxscore = boxes[i, 4]
+ * maxpos = i # <<<<<<<<<<<<<<
+ *
+ * tx1 = boxes[i, 0]
+ */
+ __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_v_i); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L1_error)
+ __pyx_v_maxpos = __pyx_t_4;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":43
+ * maxpos = i
+ *
+ * tx1 = boxes[i, 0] # <<<<<<<<<<<<<<
+ * ty1 = boxes[i, 1]
+ * tx2 = boxes[i, 2]
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0);
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 43, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_tx1 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":44
+ *
+ * tx1 = boxes[i, 0]
+ * ty1 = boxes[i, 1] # <<<<<<<<<<<<<<
+ * tx2 = boxes[i, 2]
+ * ty2 = boxes[i, 3]
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 44, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_ty1 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":45
+ * tx1 = boxes[i, 0]
+ * ty1 = boxes[i, 1]
+ * tx2 = boxes[i, 2] # <<<<<<<<<<<<<<
+ * ty2 = boxes[i, 3]
+ * ts = boxes[i, 4]
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2);
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 45, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_tx2 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":46
+ * ty1 = boxes[i, 1]
+ * tx2 = boxes[i, 2]
+ * ty2 = boxes[i, 3] # <<<<<<<<<<<<<<
+ * ts = boxes[i, 4]
+ * ti = inds[i]
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3);
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_ty2 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":47
+ * tx2 = boxes[i, 2]
+ * ty2 = boxes[i, 3]
+ * ts = boxes[i, 4] # <<<<<<<<<<<<<<
+ * ti = inds[i]
+ *
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 47, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 47, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 47, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_ts = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":48
+ * ty2 = boxes[i, 3]
+ * ts = boxes[i, 4]
+ * ti = inds[i] # <<<<<<<<<<<<<<
+ *
+ * pos = i + 1
+ */
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_inds, __pyx_v_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 48, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_XDECREF_SET(__pyx_v_ti, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":50
+ * ti = inds[i]
+ *
+ * pos = i + 1 # <<<<<<<<<<<<<<
+ * # get max box
+ * while pos < N:
+ */
+ __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 50, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_pos = __pyx_t_4;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":52
+ * pos = i + 1
+ * # get max box
+ * while pos < N: # <<<<<<<<<<<<<<
+ * if maxscore < boxes[pos, 4]:
+ * maxscore = boxes[pos, 4]
+ */
+ while (1) {
+ __pyx_t_9 = ((__pyx_v_pos < __pyx_v_N) != 0);
+ if (!__pyx_t_9) break;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":53
+ * # get max box
+ * while pos < N:
+ * if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<<
+ * maxscore = boxes[pos, 4]
+ * maxpos = pos
+ */
+ __pyx_t_3 = PyFloat_FromDouble(__pyx_v_maxscore); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_t_1, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (__pyx_t_9) {
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":54
+ * while pos < N:
+ * if maxscore < boxes[pos, 4]:
+ * maxscore = boxes[pos, 4] # <<<<<<<<<<<<<<
+ * maxpos = pos
+ * pos = pos + 1
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 54, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 54, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 54, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_v_maxscore = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":55
+ * if maxscore < boxes[pos, 4]:
+ * maxscore = boxes[pos, 4]
+ * maxpos = pos # <<<<<<<<<<<<<<
+ * pos = pos + 1
+ *
+ */
+ __pyx_v_maxpos = __pyx_v_pos;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":53
+ * # get max box
+ * while pos < N:
+ * if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<<
+ * maxscore = boxes[pos, 4]
+ * maxpos = pos
+ */
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":56
+ * maxscore = boxes[pos, 4]
+ * maxpos = pos
+ * pos = pos + 1 # <<<<<<<<<<<<<<
+ *
+ * # add max box as a detection
+ */
+ __pyx_v_pos = (__pyx_v_pos + 1);
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":59
+ *
+ * # add max box as a detection
+ * boxes[i, 0] = boxes[maxpos, 0] # <<<<<<<<<<<<<<
+ * boxes[i, 1] = boxes[maxpos, 1]
+ * boxes[i, 2] = boxes[maxpos, 2]
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0);
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":60
+ * # add max box as a detection
+ * boxes[i, 0] = boxes[maxpos, 0]
+ * boxes[i, 1] = boxes[maxpos, 1] # <<<<<<<<<<<<<<
+ * boxes[i, 2] = boxes[maxpos, 2]
+ * boxes[i, 3] = boxes[maxpos, 3]
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_1);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_1);
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 60, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":61
+ * boxes[i, 0] = boxes[maxpos, 0]
+ * boxes[i, 1] = boxes[maxpos, 1]
+ * boxes[i, 2] = boxes[maxpos, 2] # <<<<<<<<<<<<<<
+ * boxes[i, 3] = boxes[maxpos, 3]
+ * boxes[i, 4] = boxes[maxpos, 4]
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2);
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 61, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":62
+ * boxes[i, 1] = boxes[maxpos, 1]
+ * boxes[i, 2] = boxes[maxpos, 2]
+ * boxes[i, 3] = boxes[maxpos, 3] # <<<<<<<<<<<<<<
+ * boxes[i, 4] = boxes[maxpos, 4]
+ * inds[i] = inds[maxpos]
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_3);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_3);
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":63
+ * boxes[i, 2] = boxes[maxpos, 2]
+ * boxes[i, 3] = boxes[maxpos, 3]
+ * boxes[i, 4] = boxes[maxpos, 4] # <<<<<<<<<<<<<<
+ * inds[i] = inds[maxpos]
+ *
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 63, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":64
+ * boxes[i, 3] = boxes[maxpos, 3]
+ * boxes[i, 4] = boxes[maxpos, 4]
+ * inds[i] = inds[maxpos] # <<<<<<<<<<<<<<
+ *
+ * # swap ith box with position of max box
+ */
+ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_inds, __pyx_v_maxpos, int, 1, __Pyx_PyInt_From_int, 0, 1, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 64, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ if (unlikely(PyObject_SetItem(__pyx_v_inds, __pyx_v_i, __pyx_t_5) < 0)) __PYX_ERR(0, 64, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":67
+ *
+ * # swap ith box with position of max box
+ * boxes[maxpos, 0] = tx1 # <<<<<<<<<<<<<<
+ * boxes[maxpos, 1] = ty1
+ * boxes[maxpos, 2] = tx2
+ */
+ __pyx_t_5 = PyFloat_FromDouble(__pyx_v_tx1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
+ __pyx_t_1 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_5) < 0)) __PYX_ERR(0, 67, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":68
+ * # swap ith box with position of max box
+ * boxes[maxpos, 0] = tx1
+ * boxes[maxpos, 1] = ty1 # <<<<<<<<<<<<<<
+ * boxes[maxpos, 2] = tx2
+ * boxes[maxpos, 3] = ty2
+ */
+ __pyx_t_5 = PyFloat_FromDouble(__pyx_v_ty1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_1);
+ __pyx_t_3 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 68, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":69
+ * boxes[maxpos, 0] = tx1
+ * boxes[maxpos, 1] = ty1
+ * boxes[maxpos, 2] = tx2 # <<<<<<<<<<<<<<
+ * boxes[maxpos, 3] = ty2
+ * boxes[maxpos, 4] = ts
+ */
+ __pyx_t_5 = PyFloat_FromDouble(__pyx_v_tx2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_2);
+ __pyx_t_1 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_5) < 0)) __PYX_ERR(0, 69, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":70
+ * boxes[maxpos, 1] = ty1
+ * boxes[maxpos, 2] = tx2
+ * boxes[maxpos, 3] = ty2 # <<<<<<<<<<<<<<
+ * boxes[maxpos, 4] = ts
+ * inds[maxpos] = ti
+ */
+ __pyx_t_5 = PyFloat_FromDouble(__pyx_v_ty2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_3);
+ __pyx_t_3 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_5) < 0)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":71
+ * boxes[maxpos, 2] = tx2
+ * boxes[maxpos, 3] = ty2
+ * boxes[maxpos, 4] = ts # <<<<<<<<<<<<<<
+ * inds[maxpos] = ti
+ *
+ */
+ __pyx_t_5 = PyFloat_FromDouble(__pyx_v_ts); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
+ __pyx_t_1 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_5) < 0)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":72
+ * boxes[maxpos, 3] = ty2
+ * boxes[maxpos, 4] = ts
+ * inds[maxpos] = ti # <<<<<<<<<<<<<<
+ *
+ * tx1 = boxes[i, 0]
+ */
+ if (unlikely(__Pyx_SetItemInt(__pyx_v_inds, __pyx_v_maxpos, __pyx_v_ti, int, 1, __Pyx_PyInt_From_int, 0, 1, 0) < 0)) __PYX_ERR(0, 72, __pyx_L1_error)
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":74
+ * inds[maxpos] = ti
+ *
+ * tx1 = boxes[i, 0] # <<<<<<<<<<<<<<
+ * ty1 = boxes[i, 1]
+ * tx2 = boxes[i, 2]
+ */
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_tx1 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":75
+ *
+ * tx1 = boxes[i, 0]
+ * ty1 = boxes[i, 1] # <<<<<<<<<<<<<<
+ * tx2 = boxes[i, 2]
+ * ty2 = boxes[i, 3]
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 75, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_v_ty1 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":76
+ * tx1 = boxes[i, 0]
+ * ty1 = boxes[i, 1]
+ * tx2 = boxes[i, 2] # <<<<<<<<<<<<<<
+ * ty2 = boxes[i, 3]
+ * ts = boxes[i, 4]
+ */
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_2);
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 76, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_tx2 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":77
+ * ty1 = boxes[i, 1]
+ * tx2 = boxes[i, 2]
+ * ty2 = boxes[i, 3] # <<<<<<<<<<<<<<
+ * ts = boxes[i, 4]
+ *
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3);
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 77, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 77, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_v_ty2 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":78
+ * tx2 = boxes[i, 2]
+ * ty2 = boxes[i, 3]
+ * ts = boxes[i, 4] # <<<<<<<<<<<<<<
+ *
+ * pos = i + 1
+ */
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 78, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_i);
+ __Pyx_GIVEREF(__pyx_v_i);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4);
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 78, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_ts = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":80
+ * ts = boxes[i, 4]
+ *
+ * pos = i + 1 # <<<<<<<<<<<<<<
+ * # NMS iterations, note that N changes if detection boxes fall below
+ * # threshold
+ */
+ __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_pos = __pyx_t_4;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":83
+ * # NMS iterations, note that N changes if detection boxes fall below
+ * # threshold
+ * while pos < N: # <<<<<<<<<<<<<<
+ * x1 = boxes[pos, 0]
+ * y1 = boxes[pos, 1]
+ */
+ while (1) {
+ __pyx_t_9 = ((__pyx_v_pos < __pyx_v_N) != 0);
+ if (!__pyx_t_9) break;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":84
+ * # threshold
+ * while pos < N:
+ * x1 = boxes[pos, 0] # <<<<<<<<<<<<<<
+ * y1 = boxes[pos, 1]
+ * x2 = boxes[pos, 2]
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 84, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 84, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 84, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_x1 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":85
+ * while pos < N:
+ * x1 = boxes[pos, 0]
+ * y1 = boxes[pos, 1] # <<<<<<<<<<<<<<
+ * x2 = boxes[pos, 2]
+ * y2 = boxes[pos, 3]
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 85, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_1);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_y1 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":86
+ * x1 = boxes[pos, 0]
+ * y1 = boxes[pos, 1]
+ * x2 = boxes[pos, 2] # <<<<<<<<<<<<<<
+ * y2 = boxes[pos, 3]
+ * s = boxes[pos, 4]
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_2);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_x2 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":87
+ * y1 = boxes[pos, 1]
+ * x2 = boxes[pos, 2]
+ * y2 = boxes[pos, 3] # <<<<<<<<<<<<<<
+ * s = boxes[pos, 4]
+ *
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 87, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 87, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_y2 = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":88
+ * x2 = boxes[pos, 2]
+ * y2 = boxes[pos, 3]
+ * s = boxes[pos, 4] # <<<<<<<<<<<<<<
+ *
+ * area = (x2 - x1 + 1) * (y2 - y1 + 1)
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 88, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":90
+ * s = boxes[pos, 4]
+ *
+ * area = (x2 - x1 + 1) * (y2 - y1 + 1) # <<<<<<<<<<<<<<
+ * iw = (min(tx2, x2) - max(tx1, x1) + 1)
+ * if iw > 0:
+ */
+ __pyx_v_area = (((__pyx_v_x2 - __pyx_v_x1) + 1.0) * ((__pyx_v_y2 - __pyx_v_y1) + 1.0));
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":91
+ *
+ * area = (x2 - x1 + 1) * (y2 - y1 + 1)
+ * iw = (min(tx2, x2) - max(tx1, x1) + 1) # <<<<<<<<<<<<<<
+ * if iw > 0:
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1)
+ */
+ __pyx_v_iw = ((__pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_min(__pyx_v_tx2, __pyx_v_x2) - __pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_max(__pyx_v_tx1, __pyx_v_x1)) + 1.0);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":92
+ * area = (x2 - x1 + 1) * (y2 - y1 + 1)
+ * iw = (min(tx2, x2) - max(tx1, x1) + 1)
+ * if iw > 0: # <<<<<<<<<<<<<<
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1)
+ * if ih > 0:
+ */
+ __pyx_t_9 = ((__pyx_v_iw > 0.0) != 0);
+ if (__pyx_t_9) {
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":93
+ * iw = (min(tx2, x2) - max(tx1, x1) + 1)
+ * if iw > 0:
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1) # <<<<<<<<<<<<<<
+ * if ih > 0:
+ * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+ */
+ __pyx_v_ih = ((__pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_min(__pyx_v_ty2, __pyx_v_y2) - __pyx_f_5mmdet_3ops_3nms_12soft_nms_cpu_max(__pyx_v_ty1, __pyx_v_y1)) + 1.0);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":94
+ * if iw > 0:
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1)
+ * if ih > 0: # <<<<<<<<<<<<<<
+ * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+ * ov = iw * ih / ua # iou between max box and detection box
+ */
+ __pyx_t_9 = ((__pyx_v_ih > 0.0) != 0);
+ if (__pyx_t_9) {
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":95
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1)
+ * if ih > 0:
+ * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) # <<<<<<<<<<<<<<
+ * ov = iw * ih / ua # iou between max box and detection box
+ *
+ */
+ __pyx_v_ua = ((double)(((((__pyx_v_tx2 - __pyx_v_tx1) + 1.0) * ((__pyx_v_ty2 - __pyx_v_ty1) + 1.0)) + __pyx_v_area) - (__pyx_v_iw * __pyx_v_ih)));
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":96
+ * if ih > 0:
+ * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+ * ov = iw * ih / ua # iou between max box and detection box # <<<<<<<<<<<<<<
+ *
+ * if method == 1: # linear
+ */
+ __pyx_t_8 = (__pyx_v_iw * __pyx_v_ih);
+ if (unlikely(__pyx_v_ua == 0)) {
+ PyErr_SetString(PyExc_ZeroDivisionError, "float division");
+ __PYX_ERR(0, 96, __pyx_L1_error)
+ }
+ __pyx_v_ov = (__pyx_t_8 / __pyx_v_ua);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":98
+ * ov = iw * ih / ua # iou between max box and detection box
+ *
+ * if method == 1: # linear # <<<<<<<<<<<<<<
+ * if ov > iou_thr:
+ * weight = 1 - ov
+ */
+ switch (__pyx_v_method) {
+ case 1:
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":99
+ *
+ * if method == 1: # linear
+ * if ov > iou_thr: # <<<<<<<<<<<<<<
+ * weight = 1 - ov
+ * else:
+ */
+ __pyx_t_9 = ((__pyx_v_ov > __pyx_v_iou_thr) != 0);
+ if (__pyx_t_9) {
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":100
+ * if method == 1: # linear
+ * if ov > iou_thr:
+ * weight = 1 - ov # <<<<<<<<<<<<<<
+ * else:
+ * weight = 1
+ */
+ __pyx_v_weight = (1.0 - __pyx_v_ov);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":99
+ *
+ * if method == 1: # linear
+ * if ov > iou_thr: # <<<<<<<<<<<<<<
+ * weight = 1 - ov
+ * else:
+ */
+ goto __pyx_L12;
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":102
+ * weight = 1 - ov
+ * else:
+ * weight = 1 # <<<<<<<<<<<<<<
+ * elif method == 2: # gaussian
+ * weight = np.exp(-(ov * ov) / sigma)
+ */
+ /*else*/ {
+ __pyx_v_weight = 1.0;
+ }
+ __pyx_L12:;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":98
+ * ov = iw * ih / ua # iou between max box and detection box
+ *
+ * if method == 1: # linear # <<<<<<<<<<<<<<
+ * if ov > iou_thr:
+ * weight = 1 - ov
+ */
+ break;
+ case 2:
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":104
+ * weight = 1
+ * elif method == 2: # gaussian
+ * weight = np.exp(-(ov * ov) / sigma) # <<<<<<<<<<<<<<
+ * else: # original NMS
+ * if ov > iou_thr:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_8 = (-(__pyx_v_ov * __pyx_v_ov));
+ if (unlikely(__pyx_v_sigma == 0)) {
+ PyErr_SetString(PyExc_ZeroDivisionError, "float division");
+ __PYX_ERR(0, 104, __pyx_L1_error)
+ }
+ __pyx_t_5 = PyFloat_FromDouble((__pyx_t_8 / __pyx_v_sigma)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_10 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
+ __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_1);
+ if (likely(__pyx_t_10)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
+ __Pyx_INCREF(__pyx_t_10);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_1, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_10, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_8 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_8 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_weight = __pyx_t_8;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":103
+ * else:
+ * weight = 1
+ * elif method == 2: # gaussian # <<<<<<<<<<<<<<
+ * weight = np.exp(-(ov * ov) / sigma)
+ * else: # original NMS
+ */
+ break;
+ default:
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":106
+ * weight = np.exp(-(ov * ov) / sigma)
+ * else: # original NMS
+ * if ov > iou_thr: # <<<<<<<<<<<<<<
+ * weight = 0
+ * else:
+ */
+ __pyx_t_9 = ((__pyx_v_ov > __pyx_v_iou_thr) != 0);
+ if (__pyx_t_9) {
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":107
+ * else: # original NMS
+ * if ov > iou_thr:
+ * weight = 0 # <<<<<<<<<<<<<<
+ * else:
+ * weight = 1
+ */
+ __pyx_v_weight = 0.0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":106
+ * weight = np.exp(-(ov * ov) / sigma)
+ * else: # original NMS
+ * if ov > iou_thr: # <<<<<<<<<<<<<<
+ * weight = 0
+ * else:
+ */
+ goto __pyx_L13;
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":109
+ * weight = 0
+ * else:
+ * weight = 1 # <<<<<<<<<<<<<<
+ *
+ * boxes[pos, 4] = weight * boxes[pos, 4]
+ */
+ /*else*/ {
+ __pyx_v_weight = 1.0;
+ }
+ __pyx_L13:;
+ break;
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":111
+ * weight = 1
+ *
+ * boxes[pos, 4] = weight * boxes[pos, 4] # <<<<<<<<<<<<<<
+ *
+ * # if box score falls below threshold, discard the box by
+ */
+ __pyx_t_3 = PyFloat_FromDouble(__pyx_v_weight); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = PyNumber_Multiply(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
+ __pyx_t_1 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_5) < 0)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":115
+ * # if box score falls below threshold, discard the box by
+ * # swapping with last box update N
+ * if boxes[pos, 4] < min_score: # <<<<<<<<<<<<<<
+ * boxes[pos, 0] = boxes[N-1, 0]
+ * boxes[pos, 1] = boxes[N-1, 1]
+ */
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
+ __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyFloat_FromDouble(__pyx_v_min_score); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_5, __pyx_t_3, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 115, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 115, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_9) {
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":116
+ * # swapping with last box update N
+ * if boxes[pos, 4] < min_score:
+ * boxes[pos, 0] = boxes[N-1, 0] # <<<<<<<<<<<<<<
+ * boxes[pos, 1] = boxes[N-1, 1]
+ * boxes[pos, 2] = boxes[N-1, 2]
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 116, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_0);
+ __Pyx_GIVEREF(__pyx_int_0);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0);
+ __pyx_t_3 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_5, __pyx_t_1) < 0)) __PYX_ERR(0, 116, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":117
+ * if boxes[pos, 4] < min_score:
+ * boxes[pos, 0] = boxes[N-1, 0]
+ * boxes[pos, 1] = boxes[N-1, 1] # <<<<<<<<<<<<<<
+ * boxes[pos, 2] = boxes[N-1, 2]
+ * boxes[pos, 3] = boxes[N-1, 3]
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 117, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 117, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_1);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 117, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 117, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_1);
+ __Pyx_GIVEREF(__pyx_int_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
+ __pyx_t_5 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 117, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":118
+ * boxes[pos, 0] = boxes[N-1, 0]
+ * boxes[pos, 1] = boxes[N-1, 1]
+ * boxes[pos, 2] = boxes[N-1, 2] # <<<<<<<<<<<<<<
+ * boxes[pos, 3] = boxes[N-1, 3]
+ * boxes[pos, 4] = boxes[N-1, 4]
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 118, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_2);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 118, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 118, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_2);
+ __Pyx_GIVEREF(__pyx_int_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_2);
+ __pyx_t_3 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_5, __pyx_t_1) < 0)) __PYX_ERR(0, 118, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":119
+ * boxes[pos, 1] = boxes[N-1, 1]
+ * boxes[pos, 2] = boxes[N-1, 2]
+ * boxes[pos, 3] = boxes[N-1, 3] # <<<<<<<<<<<<<<
+ * boxes[pos, 4] = boxes[N-1, 4]
+ * inds[pos] = inds[N - 1]
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 119, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 119, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_3);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 119, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 119, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5);
+ __Pyx_INCREF(__pyx_int_3);
+ __Pyx_GIVEREF(__pyx_int_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3);
+ __pyx_t_5 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 119, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":120
+ * boxes[pos, 2] = boxes[N-1, 2]
+ * boxes[pos, 3] = boxes[N-1, 3]
+ * boxes[pos, 4] = boxes[N-1, 4] # <<<<<<<<<<<<<<
+ * inds[pos] = inds[N - 1]
+ * N = N - 1
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 120, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
+ __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 120, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 120, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
+ __Pyx_INCREF(__pyx_int_4);
+ __Pyx_GIVEREF(__pyx_int_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4);
+ __pyx_t_3 = 0;
+ if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_5, __pyx_t_1) < 0)) __PYX_ERR(0, 120, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":121
+ * boxes[pos, 3] = boxes[N-1, 3]
+ * boxes[pos, 4] = boxes[N-1, 4]
+ * inds[pos] = inds[N - 1] # <<<<<<<<<<<<<<
+ * N = N - 1
+ * pos = pos - 1
+ */
+ __pyx_t_11 = (__pyx_v_N - 1);
+ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_inds, __pyx_t_11, long, 1, __Pyx_PyInt_From_long, 0, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (unlikely(__Pyx_SetItemInt(__pyx_v_inds, __pyx_v_pos, __pyx_t_1, int, 1, __Pyx_PyInt_From_int, 0, 1, 0) < 0)) __PYX_ERR(0, 121, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":122
+ * boxes[pos, 4] = boxes[N-1, 4]
+ * inds[pos] = inds[N - 1]
+ * N = N - 1 # <<<<<<<<<<<<<<
+ * pos = pos - 1
+ *
+ */
+ __pyx_v_N = (__pyx_v_N - 1);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":123
+ * inds[pos] = inds[N - 1]
+ * N = N - 1
+ * pos = pos - 1 # <<<<<<<<<<<<<<
+ *
+ * pos = pos + 1
+ */
+ __pyx_v_pos = (__pyx_v_pos - 1);
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":115
+ * # if box score falls below threshold, discard the box by
+ * # swapping with last box update N
+ * if boxes[pos, 4] < min_score: # <<<<<<<<<<<<<<
+ * boxes[pos, 0] = boxes[N-1, 0]
+ * boxes[pos, 1] = boxes[N-1, 1]
+ */
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":94
+ * if iw > 0:
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1)
+ * if ih > 0: # <<<<<<<<<<<<<<
+ * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
+ * ov = iw * ih / ua # iou between max box and detection box
+ */
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":92
+ * area = (x2 - x1 + 1) * (y2 - y1 + 1)
+ * iw = (min(tx2, x2) - max(tx1, x1) + 1)
+ * if iw > 0: # <<<<<<<<<<<<<<
+ * ih = (min(ty2, y2) - max(ty1, y1) + 1)
+ * if ih > 0:
+ */
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":125
+ * pos = pos - 1
+ *
+ * pos = pos + 1 # <<<<<<<<<<<<<<
+ *
+ * return boxes[:N], inds[:N]
+ */
+ __pyx_v_pos = (__pyx_v_pos + 1);
+ }
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":39
+ * inds = np.arange(N)
+ *
+ * for i in range(N): # <<<<<<<<<<<<<<
+ * maxscore = boxes[i, 4]
+ * maxpos = i
+ */
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":127
+ * pos = pos + 1
+ *
+ * return boxes[:N], inds[:N] # <<<<<<<<<<<<<<
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_boxes, 0, __pyx_v_N, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_inds, 0, __pyx_v_N, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 127, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
+ __pyx_t_2 = 0;
+ __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+
+ /* "mmdet/ops/nms/src/soft_nms_cpu.pyx":22
+ *
+ *
+ * def soft_nms_cpu( # <<<<<<<<<<<<<<
+ * np.ndarray[float, ndim=2] boxes_in,
+ * float iou_thr,
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_10);
+ { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer);
+ __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+ __Pyx_AddTraceback("mmdet.ops.nms.soft_nms_cpu.soft_nms_cpu", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ goto __pyx_L2;
+ __pyx_L0:;
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer);
+ __pyx_L2:;
+ __Pyx_XDECREF(__pyx_v_boxes);
+ __Pyx_XDECREF(__pyx_v_inds);
+ __Pyx_XDECREF(__pyx_v_i);
+ __Pyx_XDECREF(__pyx_v_ti);
+ __Pyx_XDECREF(__pyx_v_s);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+ * # experimental exception made for __getbuffer__ and __releasebuffer__
+ * # -- the details of this may change.
+ * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
+ * # This implementation of getbuffer is geared towards Cython
+ * # requirements, and does not yet fulfill the PEP.
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+ int __pyx_v_i;
+ int __pyx_v_ndim;
+ int __pyx_v_endian_detector;
+ int __pyx_v_little_endian;
+ int __pyx_v_t;
+ char *__pyx_v_f;
+ PyArray_Descr *__pyx_v_descr = 0;
+ int __pyx_v_offset;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyArray_Descr *__pyx_t_7;
+ PyObject *__pyx_t_8 = NULL;
+ char *__pyx_t_9;
+ if (__pyx_v_info == NULL) {
+ PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
+ return -1;
+ }
+ __Pyx_RefNannySetupContext("__getbuffer__", 0);
+ __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(__pyx_v_info->obj);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+ *
+ * cdef int i, ndim
+ * cdef int endian_detector = 1 # <<<<<<<<<<<<<<
+ * cdef bint little_endian = ((&endian_detector)[0] != 0)
+ *
+ */
+ __pyx_v_endian_detector = 1;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+ * cdef int i, ndim
+ * cdef int endian_detector = 1
+ * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
+ *
+ * ndim = PyArray_NDIM(self)
+ */
+ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+ * cdef bint little_endian = ((&endian_detector)[0] != 0)
+ *
+ * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ */
+ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+ * ndim = PyArray_NDIM(self)
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not C contiguous")
+ */
+ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
+ if (__pyx_t_2) {
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L4_bool_binop_done;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"ndarray is not C contiguous")
+ *
+ */
+ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L4_bool_binop_done:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+ * ndim = PyArray_NDIM(self)
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not C contiguous")
+ */
+ if (unlikely(__pyx_t_1)) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 272, __pyx_L1_error)
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+ * ndim = PyArray_NDIM(self)
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not C contiguous")
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ * raise ValueError(u"ndarray is not C contiguous")
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ */
+ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
+ if (__pyx_t_2) {
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L7_bool_binop_done;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ *
+ */
+ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L7_bool_binop_done:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ * raise ValueError(u"ndarray is not C contiguous")
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ */
+ if (unlikely(__pyx_t_1)) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
+ *
+ * info.buf = PyArray_DATA(self)
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 276, __pyx_L1_error)
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ * raise ValueError(u"ndarray is not C contiguous")
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ *
+ * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
+ * info.ndim = ndim
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
+ *
+ * info.buf = PyArray_DATA(self)
+ * info.ndim = ndim # <<<<<<<<<<<<<<
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ * # Allocate new buffer for strides and shape info.
+ */
+ __pyx_v_info->ndim = __pyx_v_ndim;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+ * info.buf = PyArray_DATA(self)
+ * info.ndim = ndim
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
+ * # Allocate new buffer for strides and shape info.
+ * # This is allocated as one block, strides first.
+ */
+ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+ * # Allocate new buffer for strides and shape info.
+ * # This is allocated as one block, strides first.
+ * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<<
+ * info.shape = info.strides + ndim
+ * for i in range(ndim):
+ */
+ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim))));
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
+ * # This is allocated as one block, strides first.
+ * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim)
+ * info.shape = info.strides + ndim # <<<<<<<<<<<<<<
+ * for i in range(ndim):
+ * info.strides[i] = PyArray_STRIDES(self)[i]
+ */
+ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
+ * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim)
+ * info.shape = info.strides + ndim
+ * for i in range(ndim): # <<<<<<<<<<<<<<
+ * info.strides[i] = PyArray_STRIDES(self)[i]
+ * info.shape[i] = PyArray_DIMS(self)[i]
+ */
+ __pyx_t_4 = __pyx_v_ndim;
+ __pyx_t_5 = __pyx_t_4;
+ for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
+ __pyx_v_i = __pyx_t_6;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+ * info.shape = info.strides + ndim
+ * for i in range(ndim):
+ * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
+ * info.shape[i] = PyArray_DIMS(self)[i]
+ * else:
+ */
+ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":287
+ * for i in range(ndim):
+ * info.strides[i] = PyArray_STRIDES(self)[i]
+ * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
+ * else:
+ * info.strides = PyArray_STRIDES(self)
+ */
+ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+ * info.buf = PyArray_DATA(self)
+ * info.ndim = ndim
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
+ * # Allocate new buffer for strides and shape info.
+ * # This is allocated as one block, strides first.
+ */
+ goto __pyx_L9;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+ * info.shape[i] = PyArray_DIMS(self)[i]
+ * else:
+ * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<<
+ * info.shape = PyArray_DIMS(self)
+ * info.suboffsets = NULL
+ */
+ /*else*/ {
+ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
+ * else:
+ * info.strides = PyArray_STRIDES(self)
+ * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<<
+ * info.suboffsets = NULL
+ * info.itemsize = PyArray_ITEMSIZE(self)
+ */
+ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
+ }
+ __pyx_L9:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ * info.strides = PyArray_STRIDES(self)
+ * info.shape = PyArray_DIMS(self)
+ * info.suboffsets = NULL # <<<<<<<<<<<<<<
+ * info.itemsize = PyArray_ITEMSIZE(self)
+ * info.readonly = not PyArray_ISWRITEABLE(self)
+ */
+ __pyx_v_info->suboffsets = NULL;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+ * info.shape = PyArray_DIMS(self)
+ * info.suboffsets = NULL
+ * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
+ * info.readonly = not PyArray_ISWRITEABLE(self)
+ *
+ */
+ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
+ * info.suboffsets = NULL
+ * info.itemsize = PyArray_ITEMSIZE(self)
+ * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
+ *
+ * cdef int t
+ */
+ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":296
+ *
+ * cdef int t
+ * cdef char* f = NULL # <<<<<<<<<<<<<<
+ * cdef dtype descr = PyArray_DESCR(self)
+ * cdef int offset
+ */
+ __pyx_v_f = NULL;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":297
+ * cdef int t
+ * cdef char* f = NULL
+ * cdef dtype descr = PyArray_DESCR(self) # <<<<<<<<<<<<<<
+ * cdef int offset
+ *
+ */
+ __pyx_t_7 = PyArray_DESCR(__pyx_v_self);
+ __pyx_t_3 = ((PyObject *)__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":300
+ * cdef int offset
+ *
+ * info.obj = self # <<<<<<<<<<<<<<
+ *
+ * if not PyDataType_HASFIELDS(descr):
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_v_self));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
+ __Pyx_GOTREF(__pyx_v_info->obj);
+ __Pyx_DECREF(__pyx_v_info->obj);
+ __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":302
+ * info.obj = self
+ *
+ * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or
+ */
+ __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":303
+ *
+ * if not PyDataType_HASFIELDS(descr):
+ * t = descr.type_num # <<<<<<<<<<<<<<
+ * if ((descr.byteorder == c'>' and little_endian) or
+ * (descr.byteorder == c'<' and not little_endian)):
+ */
+ __pyx_t_4 = __pyx_v_descr->type_num;
+ __pyx_v_t = __pyx_t_4;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304
+ * if not PyDataType_HASFIELDS(descr):
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
+ if (!__pyx_t_2) {
+ goto __pyx_L15_next_or;
+ } else {
+ }
+ __pyx_t_2 = (__pyx_v_little_endian != 0);
+ if (!__pyx_t_2) {
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L14_bool_binop_done;
+ }
+ __pyx_L15_next_or:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":305
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or
+ * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"Non-native byte order not supported")
+ * if t == NPY_BYTE: f = "b"
+ */
+ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
+ if (__pyx_t_2) {
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L14_bool_binop_done;
+ }
+ __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L14_bool_binop_done:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304
+ * if not PyDataType_HASFIELDS(descr):
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ if (unlikely(__pyx_t_1)) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":306
+ * if ((descr.byteorder == c'>' and little_endian) or
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
+ * if t == NPY_BYTE: f = "b"
+ * elif t == NPY_UBYTE: f = "B"
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 306, __pyx_L1_error)
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":304
+ * if not PyDataType_HASFIELDS(descr):
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":307
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
+ * elif t == NPY_UBYTE: f = "B"
+ * elif t == NPY_SHORT: f = "h"
+ */
+ switch (__pyx_v_t) {
+ case NPY_BYTE:
+ __pyx_v_f = ((char *)"b");
+ break;
+ case NPY_UBYTE:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":308
+ * raise ValueError(u"Non-native byte order not supported")
+ * if t == NPY_BYTE: f = "b"
+ * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
+ * elif t == NPY_SHORT: f = "h"
+ * elif t == NPY_USHORT: f = "H"
+ */
+ __pyx_v_f = ((char *)"B");
+ break;
+ case NPY_SHORT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":309
+ * if t == NPY_BYTE: f = "b"
+ * elif t == NPY_UBYTE: f = "B"
+ * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
+ * elif t == NPY_USHORT: f = "H"
+ * elif t == NPY_INT: f = "i"
+ */
+ __pyx_v_f = ((char *)"h");
+ break;
+ case NPY_USHORT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":310
+ * elif t == NPY_UBYTE: f = "B"
+ * elif t == NPY_SHORT: f = "h"
+ * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
+ * elif t == NPY_INT: f = "i"
+ * elif t == NPY_UINT: f = "I"
+ */
+ __pyx_v_f = ((char *)"H");
+ break;
+ case NPY_INT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":311
+ * elif t == NPY_SHORT: f = "h"
+ * elif t == NPY_USHORT: f = "H"
+ * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
+ * elif t == NPY_UINT: f = "I"
+ * elif t == NPY_LONG: f = "l"
+ */
+ __pyx_v_f = ((char *)"i");
+ break;
+ case NPY_UINT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":312
+ * elif t == NPY_USHORT: f = "H"
+ * elif t == NPY_INT: f = "i"
+ * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONG: f = "l"
+ * elif t == NPY_ULONG: f = "L"
+ */
+ __pyx_v_f = ((char *)"I");
+ break;
+ case NPY_LONG:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":313
+ * elif t == NPY_INT: f = "i"
+ * elif t == NPY_UINT: f = "I"
+ * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONG: f = "L"
+ * elif t == NPY_LONGLONG: f = "q"
+ */
+ __pyx_v_f = ((char *)"l");
+ break;
+ case NPY_ULONG:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":314
+ * elif t == NPY_UINT: f = "I"
+ * elif t == NPY_LONG: f = "l"
+ * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGLONG: f = "q"
+ * elif t == NPY_ULONGLONG: f = "Q"
+ */
+ __pyx_v_f = ((char *)"L");
+ break;
+ case NPY_LONGLONG:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":315
+ * elif t == NPY_LONG: f = "l"
+ * elif t == NPY_ULONG: f = "L"
+ * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONGLONG: f = "Q"
+ * elif t == NPY_FLOAT: f = "f"
+ */
+ __pyx_v_f = ((char *)"q");
+ break;
+ case NPY_ULONGLONG:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":316
+ * elif t == NPY_ULONG: f = "L"
+ * elif t == NPY_LONGLONG: f = "q"
+ * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
+ * elif t == NPY_FLOAT: f = "f"
+ * elif t == NPY_DOUBLE: f = "d"
+ */
+ __pyx_v_f = ((char *)"Q");
+ break;
+ case NPY_FLOAT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":317
+ * elif t == NPY_LONGLONG: f = "q"
+ * elif t == NPY_ULONGLONG: f = "Q"
+ * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
+ * elif t == NPY_DOUBLE: f = "d"
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ */
+ __pyx_v_f = ((char *)"f");
+ break;
+ case NPY_DOUBLE:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":318
+ * elif t == NPY_ULONGLONG: f = "Q"
+ * elif t == NPY_FLOAT: f = "f"
+ * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ * elif t == NPY_CFLOAT: f = "Zf"
+ */
+ __pyx_v_f = ((char *)"d");
+ break;
+ case NPY_LONGDOUBLE:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":319
+ * elif t == NPY_FLOAT: f = "f"
+ * elif t == NPY_DOUBLE: f = "d"
+ * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
+ * elif t == NPY_CFLOAT: f = "Zf"
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ */
+ __pyx_v_f = ((char *)"g");
+ break;
+ case NPY_CFLOAT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":320
+ * elif t == NPY_DOUBLE: f = "d"
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ */
+ __pyx_v_f = ((char *)"Zf");
+ break;
+ case NPY_CDOUBLE:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":321
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ * elif t == NPY_CFLOAT: f = "Zf"
+ * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ * elif t == NPY_OBJECT: f = "O"
+ */
+ __pyx_v_f = ((char *)"Zd");
+ break;
+ case NPY_CLONGDOUBLE:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":322
+ * elif t == NPY_CFLOAT: f = "Zf"
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
+ * elif t == NPY_OBJECT: f = "O"
+ * else:
+ */
+ __pyx_v_f = ((char *)"Zg");
+ break;
+ case NPY_OBJECT:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":323
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+ __pyx_v_f = ((char *)"O");
+ break;
+ default:
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":325
+ * elif t == NPY_OBJECT: f = "O"
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
+ * info.format = f
+ * return
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 325, __pyx_L1_error)
+ break;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":326
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ * info.format = f # <<<<<<<<<<<<<<
+ * return
+ * else:
+ */
+ __pyx_v_info->format = __pyx_v_f;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":327
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ * info.format = f
+ * return # <<<<<<<<<<<<<<
+ * else:
+ * info.format = PyObject_Malloc(_buffer_format_string_len)
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":302
+ * info.obj = self
+ *
+ * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":329
+ * return
+ * else:
+ * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
+ * info.format[0] = c'^' # Native data types, manual alignment
+ * offset = 0
+ */
+ /*else*/ {
+ __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF));
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":330
+ * else:
+ * info.format = PyObject_Malloc(_buffer_format_string_len)
+ * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
+ * offset = 0
+ * f = _util_dtypestring(descr, info.format + 1,
+ */
+ (__pyx_v_info->format[0]) = '^';
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":331
+ * info.format = PyObject_Malloc(_buffer_format_string_len)
+ * info.format[0] = c'^' # Native data types, manual alignment
+ * offset = 0 # <<<<<<<<<<<<<<
+ * f = _util_dtypestring(descr, info.format + 1,
+ * info.format + _buffer_format_string_len,
+ */
+ __pyx_v_offset = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":332
+ * info.format[0] = c'^' # Native data types, manual alignment
+ * offset = 0
+ * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
+ * info.format + _buffer_format_string_len,
+ * &offset)
+ */
+ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error)
+ __pyx_v_f = __pyx_t_9;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":335
+ * info.format + _buffer_format_string_len,
+ * &offset)
+ * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ */
+ (__pyx_v_f[0]) = '\x00';
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+ * # experimental exception made for __getbuffer__ and __releasebuffer__
+ * # -- the details of this may change.
+ * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
+ * # This implementation of getbuffer is geared towards Cython
+ * # requirements, and does not yet fulfill the PEP.
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ if (__pyx_v_info->obj != NULL) {
+ __Pyx_GOTREF(__pyx_v_info->obj);
+ __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
+ }
+ goto __pyx_L2;
+ __pyx_L0:;
+ if (__pyx_v_info->obj == Py_None) {
+ __Pyx_GOTREF(__pyx_v_info->obj);
+ __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
+ }
+ __pyx_L2:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_descr);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":337
+ * f[0] = c'\0' # Terminate format string
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
+ * if PyArray_HASFIELDS(self):
+ * PyObject_Free(info.format)
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
+ __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("__releasebuffer__", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":338
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
+ * PyObject_Free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":339
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ * if PyArray_HASFIELDS(self):
+ * PyObject_Free(info.format) # <<<<<<<<<<<<<<
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ * PyObject_Free(info.strides)
+ */
+ PyObject_Free(__pyx_v_info->format);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":338
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
+ * PyObject_Free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":340
+ * if PyArray_HASFIELDS(self):
+ * PyObject_Free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
+ * PyObject_Free(info.strides)
+ * # info.shape was stored after info.strides in the same block
+ */
+ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":341
+ * PyObject_Free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ * PyObject_Free(info.strides) # <<<<<<<<<<<<<<
+ * # info.shape was stored after info.strides in the same block
+ *
+ */
+ PyObject_Free(__pyx_v_info->strides);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":340
+ * if PyArray_HASFIELDS(self):
+ * PyObject_Free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
+ * PyObject_Free(info.strides)
+ * # info.shape was stored after info.strides in the same block
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":337
+ * f[0] = c'\0' # Terminate format string
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
+ * if PyArray_HASFIELDS(self):
+ * PyObject_Free(info.format)
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+ * ctypedef npy_cdouble complex_t
+ *
+ * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(1, a)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ *
+ * cdef inline object PyArray_MultiIterNew1(a):
+ * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 822, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+ * ctypedef npy_cdouble complex_t
+ *
+ * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(1, a)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
+ * return PyArray_MultiIterNew(1, a)
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 825, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
+ * return PyArray_MultiIterNew(1, a)
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 828, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 831, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<<
+ *
+ * cdef inline tuple PyDataType_SHAPE(dtype d):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 834, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
+ * if PyDataType_HASSUBARRAY(d):
+ * return d.subarray.shape
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+ *
+ * cdef inline tuple PyDataType_SHAPE(dtype d):
+ * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
+ * return d.subarray.shape
+ * else:
+ */
+ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+ * cdef inline tuple PyDataType_SHAPE(dtype d):
+ * if PyDataType_HASSUBARRAY(d):
+ * return d.subarray.shape # <<<<<<<<<<<<<<
+ * else:
+ * return ()
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
+ __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+ *
+ * cdef inline tuple PyDataType_SHAPE(dtype d):
+ * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
+ * return d.subarray.shape
+ * else:
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+ * return d.subarray.shape
+ * else:
+ * return () # <<<<<<<<<<<<<<
+ *
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_empty_tuple);
+ __pyx_r = __pyx_empty_tuple;
+ goto __pyx_L0;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
+ * if PyDataType_HASSUBARRAY(d):
+ * return d.subarray.shape
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+ * return ()
+ *
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
+ * # Recursive utility function used in __getbuffer__ to get format
+ * # string. The new location in the format string is returned.
+ */
+
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
+ PyArray_Descr *__pyx_v_child = 0;
+ int __pyx_v_endian_detector;
+ int __pyx_v_little_endian;
+ PyObject *__pyx_v_fields = 0;
+ PyObject *__pyx_v_childname = NULL;
+ PyObject *__pyx_v_new_offset = NULL;
+ PyObject *__pyx_v_t = NULL;
+ char *__pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ long __pyx_t_8;
+ char *__pyx_t_9;
+ __Pyx_RefNannySetupContext("_util_dtypestring", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
+ *
+ * cdef dtype child
+ * cdef int endian_detector = 1 # <<<<<<<<<<<<<<
+ * cdef bint little_endian = ((&endian_detector)[0] != 0)
+ * cdef tuple fields
+ */
+ __pyx_v_endian_detector = 1;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
+ * cdef dtype child
+ * cdef int endian_detector = 1
+ * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
+ * cdef tuple fields
+ *
+ */
+ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":851
+ * cdef tuple fields
+ *
+ * for childname in descr.names: # <<<<<<<<<<<<<<
+ * fields = descr.fields[childname]
+ * child, new_offset = fields
+ */
+ if (unlikely(__pyx_v_descr->names == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(1, 851, __pyx_L1_error)
+ }
+ __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+ for (;;) {
+ if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 851, __pyx_L1_error)
+ #else
+ __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ #endif
+ __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
+ *
+ * for childname in descr.names:
+ * fields = descr.fields[childname] # <<<<<<<<<<<<<<
+ * child, new_offset = fields
+ *
+ */
+ if (unlikely(__pyx_v_descr->fields == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 852, __pyx_L1_error)
+ }
+ __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 852, __pyx_L1_error)
+ __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
+ * for childname in descr.names:
+ * fields = descr.fields[childname]
+ * child, new_offset = fields # <<<<<<<<<<<<<<
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15:
+ */
+ if (likely(__pyx_v_fields != Py_None)) {
+ PyObject* sequence = __pyx_v_fields;
+ Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
+ if (unlikely(size != 2)) {
+ if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+ else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+ __PYX_ERR(1, 853, __pyx_L1_error)
+ }
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ #else
+ __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ #endif
+ } else {
+ __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 853, __pyx_L1_error)
+ }
+ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 853, __pyx_L1_error)
+ __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":855
+ * child, new_offset = fields
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 855, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
+ if (unlikely(__pyx_t_6)) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":856
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15:
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
+ *
+ * if ((child.byteorder == c'>' and little_endian) or
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 856, __pyx_L1_error)
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":855
+ * child, new_offset = fields
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (child.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
+ if (!__pyx_t_7) {
+ goto __pyx_L8_next_or;
+ } else {
+ }
+ __pyx_t_7 = (__pyx_v_little_endian != 0);
+ if (!__pyx_t_7) {
+ } else {
+ __pyx_t_6 = __pyx_t_7;
+ goto __pyx_L7_bool_binop_done;
+ }
+ __pyx_L8_next_or:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":859
+ *
+ * if ((child.byteorder == c'>' and little_endian) or
+ * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"Non-native byte order not supported")
+ * # One could encode it in the format string and have Cython
+ */
+ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
+ if (__pyx_t_7) {
+ } else {
+ __pyx_t_6 = __pyx_t_7;
+ goto __pyx_L7_bool_binop_done;
+ }
+ __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
+ __pyx_t_6 = __pyx_t_7;
+ __pyx_L7_bool_binop_done:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (child.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ if (unlikely(__pyx_t_6)) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":860
+ * if ((child.byteorder == c'>' and little_endian) or
+ * (child.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
+ * # One could encode it in the format string and have Cython
+ * # complain instead, BUT: < and > in format strings also imply
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 860, __pyx_L1_error)
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":858
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (child.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":870
+ *
+ * # Output padding bytes
+ * while offset[0] < new_offset: # <<<<<<<<<<<<<<
+ * f[0] = 120 # "x"; pad byte
+ * f += 1
+ */
+ while (1) {
+ __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 870, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 870, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 870, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (!__pyx_t_6) break;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":871
+ * # Output padding bytes
+ * while offset[0] < new_offset:
+ * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
+ * f += 1
+ * offset[0] += 1
+ */
+ (__pyx_v_f[0]) = 0x78;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":872
+ * while offset[0] < new_offset:
+ * f[0] = 120 # "x"; pad byte
+ * f += 1 # <<<<<<<<<<<<<<
+ * offset[0] += 1
+ *
+ */
+ __pyx_v_f = (__pyx_v_f + 1);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":873
+ * f[0] = 120 # "x"; pad byte
+ * f += 1
+ * offset[0] += 1 # <<<<<<<<<<<<<<
+ *
+ * offset[0] += child.itemsize
+ */
+ __pyx_t_8 = 0;
+ (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":875
+ * offset[0] += 1
+ *
+ * offset[0] += child.itemsize # <<<<<<<<<<<<<<
+ *
+ * if not PyDataType_HASFIELDS(child):
+ */
+ __pyx_t_8 = 0;
+ (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":877
+ * offset[0] += child.itemsize
+ *
+ * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
+ * t = child.type_num
+ * if end - f < 5:
+ */
+ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
+ if (__pyx_t_6) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":878
+ *
+ * if not PyDataType_HASFIELDS(child):
+ * t = child.type_num # <<<<<<<<<<<<<<
+ * if end - f < 5:
+ * raise RuntimeError(u"Format string allocated too short.")
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 878, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":879
+ * if not PyDataType_HASFIELDS(child):
+ * t = child.type_num
+ * if end - f < 5: # <<<<<<<<<<<<<<
+ * raise RuntimeError(u"Format string allocated too short.")
+ *
+ */
+ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
+ if (unlikely(__pyx_t_6)) {
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":880
+ * t = child.type_num
+ * if end - f < 5:
+ * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
+ *
+ * # Until ticket #99 is fixed, use integers to avoid warnings
+ */
+ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 880, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __PYX_ERR(1, 880, __pyx_L1_error)
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":879
+ * if not PyDataType_HASFIELDS(child):
+ * t = child.type_num
+ * if end - f < 5: # <<<<<<<<<<<<<<
+ * raise RuntimeError(u"Format string allocated too short.")
+ *
+ */
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":883
+ *
+ * # Until ticket #99 is fixed, use integers to avoid warnings
+ * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
+ * elif t == NPY_UBYTE: f[0] = 66 #"B"
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 98;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":884
+ * # Until ticket #99 is fixed, use integers to avoid warnings
+ * if t == NPY_BYTE: f[0] = 98 #"b"
+ * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 66;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":885
+ * if t == NPY_BYTE: f[0] = 98 #"b"
+ * elif t == NPY_UBYTE: f[0] = 66 #"B"
+ * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x68;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":886
+ * elif t == NPY_UBYTE: f[0] = 66 #"B"
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 72;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":887
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x69;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":888
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 73;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":889
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x6C;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":890
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 76;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":891
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x71;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":892
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 81;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":893
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x66;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":894
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x64;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":895
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 0x67;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":896
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 90;
+ (__pyx_v_f[1]) = 0x66;
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":897
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ * elif t == NPY_OBJECT: f[0] = 79 #"O"
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 90;
+ (__pyx_v_f[1]) = 0x64;
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":898
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
+ * elif t == NPY_OBJECT: f[0] = 79 #"O"
+ * else:
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 90;
+ (__pyx_v_f[1]) = 0x67;
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":899
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 899, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 899, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 899, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (likely(__pyx_t_6)) {
+ (__pyx_v_f[0]) = 79;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":901
+ * elif t == NPY_OBJECT: f[0] = 79 #"O"
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
+ * f += 1
+ * else:
+ */
+ /*else*/ {
+ __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 901, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 901, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __PYX_ERR(1, 901, __pyx_L1_error)
+ }
+ __pyx_L15:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":902
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ * f += 1 # <<<<<<<<<<<<<<
+ * else:
+ * # Cython ignores struct boundary information ("T{...}"),
+ */
+ __pyx_v_f = (__pyx_v_f + 1);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":877
+ * offset[0] += child.itemsize
+ *
+ * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
+ * t = child.type_num
+ * if end - f < 5:
+ */
+ goto __pyx_L13;
+ }
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":906
+ * # Cython ignores struct boundary information ("T{...}"),
+ * # so don't output it
+ * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
+ * return f
+ *
+ */
+ /*else*/ {
+ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 906, __pyx_L1_error)
+ __pyx_v_f = __pyx_t_9;
+ }
+ __pyx_L13:;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":851
+ * cdef tuple fields
+ *
+ * for childname in descr.names: # <<<<<<<<<<<<<<
+ * fields = descr.fields[childname]
+ * child, new_offset = fields
+ */
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":907
+ * # so don't output it
+ * f = _util_dtypestring(child, f, end, offset)
+ * return f # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_v_f;
+ goto __pyx_L0;
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+ * return ()
+ *
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
+ * # Recursive utility function used in __getbuffer__ to get format
+ * # string. The new location in the format string is returned.
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_child);
+ __Pyx_XDECREF(__pyx_v_fields);
+ __Pyx_XDECREF(__pyx_v_childname);
+ __Pyx_XDECREF(__pyx_v_new_offset);
+ __Pyx_XDECREF(__pyx_v_t);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022
+ * int _import_umath() except -1
+ *
+ * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
+ * Py_INCREF(base) # important to do this before stealing the reference below!
+ * PyArray_SetBaseObject(arr, base)
+ */
+
+static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("set_array_base", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1023
+ *
+ * cdef inline void set_array_base(ndarray arr, object base):
+ * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
+ * PyArray_SetBaseObject(arr, base)
+ *
+ */
+ Py_INCREF(__pyx_v_base);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1024
+ * cdef inline void set_array_base(ndarray arr, object base):
+ * Py_INCREF(base) # important to do this before stealing the reference below!
+ * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object get_array_base(ndarray arr):
+ */
+ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022
+ * int _import_umath() except -1
+ *
+ * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
+ * Py_INCREF(base) # important to do this before stealing the reference below!
+ * PyArray_SetBaseObject(arr, base)
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026
+ * PyArray_SetBaseObject(arr, base)
+ *
+ * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
+ * base = PyArray_BASE(arr)
+ * if base is NULL:
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
+ PyObject *__pyx_v_base;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("get_array_base", 0);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1027
+ *
+ * cdef inline object get_array_base(ndarray arr):
+ * base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
+ * if base is NULL:
+ * return None
+ */
+ __pyx_v_base = PyArray_BASE(__pyx_v_arr);
+
+ /* "../../../../../../home/vasgaoweithu/anaconda3/lib/python3.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028
+ * cdef inline object get_array_base(ndarray arr):
+ * base = PyArray_BASE(arr)
+ * if base is NULL: # <<<<<<<<<<<<<<
+ * return None
+ * return