From 5a004548b112aaf9757bc0311d1003a67f59ad67 Mon Sep 17 00:00:00 2001 From: "junjie.huang" Date: Mon, 13 Nov 2023 16:51:51 +0800 Subject: [PATCH] init commit for dev3.0 to support dal --- README.md | 26 +- configs/bevdet/bevdet-r50-4d-cbgs.py | 17 +- configs/bevdet/bevdet-r50-4d-depth-cbgs.py | 17 +- configs/bevdet/bevdet-r50-4d-stereo-cbgs.py | 17 +- configs/bevdet/bevdet-r50-4dlongterm-cbgs.py | 17 +- .../bevdet-r50-4dlongterm-depth-cbgs.py | 17 +- .../bevdet-r50-4dlongterm-stereo-cbgs.py | 17 +- configs/bevdet/bevdet-r50-cbgs.py | 17 +- configs/bevdet/bevdet-r50.py | 17 +- .../bevdet-stbase-4d-stereo-512x1408-cbgs.py | 17 +- .../bevdet-occ-r50-4d-stereo-24e.py | 20 +- .../bevdet-occ-r50-4d-stereo-24e_384704.py | 20 +- ...et-occ-r50-4dlongterm-stereo-24e_384704.py | 20 +- ...evdet-occ-stbase-4d-stereo-512x1408-24e.py | 20 +- configs/dal/dal-base.py | 381 ++++++++ configs/dal/dal-large.py | 384 +++++++++ configs/dal/dal-tiny.py | 379 ++++++++ docs/en/news.md | 6 +- mmdet3d/core/__init__.py | 1 + mmdet3d/core/bbox/assigners/__init__.py | 5 +- .../bbox/assigners/hungarian_assigner_3d.py | 148 ++++ mmdet3d/core/bbox/coders/__init__.py | 3 +- .../bbox/coders/transfusion_bbox_coder.py | 124 +++ mmdet3d/core/two_stage_runner.py | 82 ++ mmdet3d/datasets/pipelines/__init__.py | 10 +- mmdet3d/datasets/pipelines/loading.py | 210 ++++- mmdet3d/datasets/pipelines/transforms_3d.py | 216 ++++- mmdet3d/models/backbones/second.py | 9 +- mmdet3d/models/dense_heads/__init__.py | 2 + mmdet3d/models/dense_heads/dal_head.py | 258 ++++++ .../models/dense_heads/transfusion_head.py | 813 ++++++++++++++++++ mmdet3d/models/detectors/__init__.py | 1 + mmdet3d/models/detectors/bevdet.py | 21 +- mmdet3d/models/detectors/dal.py | 155 ++++ mmdet3d/models/necks/view_transformer.py | 50 +- mmdet3d/models/utils/__init__.py | 4 +- mmdet3d/models/utils/grid_mask.py | 127 +++ mmdet3d/models/utils/spconv_voxelize.py | 72 ++ mmdet3d/models/utils/transformer.py | 578 +++++++++++++ resources/nds-fps-dal.png | Bin 0 -> 77085 bytes tools/analysis_tools/vis.py | 2 +- tools/create_data_bevdet.py | 19 +- tools/data_converter/create_gt_database.py | 479 ++--------- tools/test.py | 2 + tools/train.py | 14 + 45 files changed, 4211 insertions(+), 603 deletions(-) create mode 100644 configs/dal/dal-base.py create mode 100644 configs/dal/dal-large.py create mode 100644 configs/dal/dal-tiny.py create mode 100644 mmdet3d/core/bbox/assigners/hungarian_assigner_3d.py create mode 100644 mmdet3d/core/bbox/coders/transfusion_bbox_coder.py create mode 100644 mmdet3d/core/two_stage_runner.py create mode 100644 mmdet3d/models/dense_heads/dal_head.py create mode 100644 mmdet3d/models/dense_heads/transfusion_head.py create mode 100644 mmdet3d/models/detectors/dal.py create mode 100644 mmdet3d/models/utils/grid_mask.py create mode 100644 mmdet3d/models/utils/spconv_voxelize.py create mode 100644 mmdet3d/models/utils/transformer.py create mode 100644 resources/nds-fps-dal.png diff --git a/README.md b/README.md index 3278b3ba..c0124ebd 100644 --- a/README.md +++ b/README.md @@ -1,24 +1,12 @@ # BEVDet -![Illustrating the performance of the proposed BEVDet on the nuScenes val set](./resources/nds-fps.png) + +![](./resources/nds-fps-dal.png) + ## News -- **2023.05.07** Improve the occpancy baseline by enlarging the input size and using long-term temporal fusion. -- **2023.04.28** Support task of [occupancy prediction](https://github.com/CVPR2023-3D-Occupancy-Prediction/CVPR2023-3D-Occupancy-Prediction) . -- **2023.04.27** Equip BEVDet with stereo depth estimation. -- **2023.04.10** Use single head for multi-class prediction. -- **2023.01.12** Support TensorRT-INT8. -- **2022.11.24** A new branch of bevdet codebase, dubbed dev2.0, is released. dev2.0 includes the following features: - -1. support **BEVPoolv2**, whose inference speed is up to **15.1 times** the previous fastest implementation of Lift-Splat-Shoot view transformer. It is also far less memory consumption. - ![bevpoolv2](./resources/bevpoolv2.png) - ![bevpoolv2](./resources/bevpoolv2_performance.png) -2. use the origin of ego coordinate system as the center of the receptive field instead of the Lidar's. -3. **support conversion of BEVDet from pytorch to TensorRT.** -4. use the long term temporal fusion as SOLOFusion. -5. train models without CBGS by default. -6. use key frame for temporal fusion. -7. Technique Report [BEVPoolv2](https://arxiv.org/abs/2211.17111) in English and [Blog](https://zhuanlan.zhihu.com/p/586637783) in Chinese. +- **2023.11.08** Support DAL for 3D object detection with LiDAR-camera fusion. + - [History](./docs/en/news.md) @@ -35,7 +23,9 @@ | [**BEVDet-R50-4DLongterm-Depth-CBGS**](configs/bevdet/bevdet-r50-4d-depth-cbgs.py) | 39.4/39.9# | 51.5/51.9# |38.4/4.0/42.4 |23.6 | [baidu](https://pan.baidu.com/s/1237QyV18zvRJ1pU3YzRItw?pwd=npe1) | [baidu](https://pan.baidu.com/s/1237QyV18zvRJ1pU3YzRItw?pwd=npe1) | | [**BEVDet-R50-4DLongterm-Stereo-CBGS**](configs/bevdet/bevdet-r50-4dlongterm-stereo-cbgs.py) | 41.1/41.5# | 52.3/52.7# |- |- | [baidu](https://pan.baidu.com/s/1237QyV18zvRJ1pU3YzRItw?pwd=npe1) | [baidu](https://pan.baidu.com/s/1237QyV18zvRJ1pU3YzRItw?pwd=npe1) | | [**BEVDet-STBase-4D-Stereo-512x1408-CBGS**](configs/bevdet/bevdet-stbase-4d-stereo-512x1408-cbgs.py) | 47.2# | 57.6# |- |- | [baidu](https://pan.baidu.com/s/1237QyV18zvRJ1pU3YzRItw?pwd=npe1) | [baidu](https://pan.baidu.com/s/1237QyV18zvRJ1pU3YzRItw?pwd=npe1) | - +| [**DAL-Tiny**](configs/dal/dal-tiny.py) | 67.4# | 71.3# |- |16.6 | [baidu](https://pan.baidu.com/s/15rmJL_SWUeQEXG9dYYl8gA?pwd=36g5) | [baidu](https://pan.baidu.com/s/15rmJL_SWUeQEXG9dYYl8gA?pwd=36g5) | +| [**DAL-Base**](configs/dal/dal-base.py) | 70.0# | 73.4# |- |10.7 | [baidu](https://pan.baidu.com/s/15rmJL_SWUeQEXG9dYYl8gA?pwd=36g5) | [baidu](https://pan.baidu.com/s/15rmJL_SWUeQEXG9dYYl8gA?pwd=36g5) | +| [**DAL-Large**](configs/dal/dal-large.py) | 71.5# | 74.0# |- |6.1 | [baidu](https://pan.baidu.com/s/15rmJL_SWUeQEXG9dYYl8gA?pwd=36g5) | [baidu](https://pan.baidu.com/s/15rmJL_SWUeQEXG9dYYl8gA?pwd=36g5) | \# align previous frame bev feature during the view transformation. Depth: Depth supervised from Lidar as BEVDepth. diff --git a/configs/bevdet/bevdet-r50-4d-cbgs.py b/configs/bevdet/bevdet-r50-4d-cbgs.py index ac3ad2d6..a463c096 100644 --- a/configs/bevdet/bevdet-r50-4d-cbgs.py +++ b/configs/bevdet/bevdet-r50-4d-cbgs.py @@ -213,8 +213,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), @@ -226,11 +227,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -268,7 +269,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, @@ -277,7 +278,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50-4d-depth-cbgs.py b/configs/bevdet/bevdet-r50-4d-depth-cbgs.py index a9e20405..5aa3ee82 100644 --- a/configs/bevdet/bevdet-r50-4d-depth-cbgs.py +++ b/configs/bevdet/bevdet-r50-4d-depth-cbgs.py @@ -212,8 +212,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict( @@ -233,11 +234,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -275,7 +276,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, @@ -284,7 +285,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50-4d-stereo-cbgs.py b/configs/bevdet/bevdet-r50-4d-stereo-cbgs.py index 3dc7745e..88f49588 100644 --- a/configs/bevdet/bevdet-r50-4d-stereo-cbgs.py +++ b/configs/bevdet/bevdet-r50-4d-stereo-cbgs.py @@ -220,8 +220,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict( @@ -241,11 +242,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -284,7 +285,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, @@ -293,7 +294,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50-4dlongterm-cbgs.py b/configs/bevdet/bevdet-r50-4dlongterm-cbgs.py index 1cc13ae0..0ac24881 100644 --- a/configs/bevdet/bevdet-r50-4dlongterm-cbgs.py +++ b/configs/bevdet/bevdet-r50-4dlongterm-cbgs.py @@ -211,8 +211,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), @@ -224,11 +225,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -266,7 +267,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, @@ -275,7 +276,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50-4dlongterm-depth-cbgs.py b/configs/bevdet/bevdet-r50-4dlongterm-depth-cbgs.py index a7e017fa..040fe735 100644 --- a/configs/bevdet/bevdet-r50-4dlongterm-depth-cbgs.py +++ b/configs/bevdet/bevdet-r50-4dlongterm-depth-cbgs.py @@ -212,8 +212,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict( @@ -233,11 +234,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -275,7 +276,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, @@ -284,7 +285,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50-4dlongterm-stereo-cbgs.py b/configs/bevdet/bevdet-r50-4dlongterm-stereo-cbgs.py index da304b8c..77e9e409 100644 --- a/configs/bevdet/bevdet-r50-4dlongterm-stereo-cbgs.py +++ b/configs/bevdet/bevdet-r50-4dlongterm-stereo-cbgs.py @@ -220,8 +220,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict( @@ -241,11 +242,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -284,7 +285,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=4, @@ -293,7 +294,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50-cbgs.py b/configs/bevdet/bevdet-r50-cbgs.py index 96a66e4b..c00e1ce3 100644 --- a/configs/bevdet/bevdet-r50-cbgs.py +++ b/configs/bevdet/bevdet-r50-cbgs.py @@ -176,8 +176,9 @@ type='PrepareImageInputs', is_train=True, data_config=data_config), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), @@ -189,11 +190,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -230,7 +231,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, @@ -239,7 +240,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-r50.py b/configs/bevdet/bevdet-r50.py index 3bf564bb..ea3bf8b5 100644 --- a/configs/bevdet/bevdet-r50.py +++ b/configs/bevdet/bevdet-r50.py @@ -175,8 +175,9 @@ type='PrepareImageInputs', is_train=True, data_config=data_config), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), @@ -188,11 +189,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -229,14 +230,14 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet/bevdet-stbase-4d-stereo-512x1408-cbgs.py b/configs/bevdet/bevdet-stbase-4d-stereo-512x1408-cbgs.py index a1c9b7af..51f526be 100644 --- a/configs/bevdet/bevdet-stbase-4d-stereo-512x1408-cbgs.py +++ b/configs/bevdet/bevdet-stbase-4d-stereo-512x1408-cbgs.py @@ -203,8 +203,9 @@ is_train=True, data_config=data_config, sequential=True), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, classes=class_names), dict( @@ -224,11 +225,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -267,7 +268,7 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=2, # with 32 GPU @@ -276,7 +277,7 @@ type='CBGSDataset', dataset=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e.py b/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e.py index 6ec2e6ca..c9c38ffa 100644 --- a/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e.py +++ b/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e.py @@ -141,11 +141,11 @@ data_config=data_config, sequential=True), dict(type='LoadOccGTFromFile'), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=True), + classes=class_names), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -161,11 +161,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -205,14 +205,14 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e_384704.py b/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e_384704.py index bd9a98f1..95ce2530 100644 --- a/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e_384704.py +++ b/configs/bevdet_occ/bevdet-occ-r50-4d-stereo-24e_384704.py @@ -247,11 +247,11 @@ data_config=data_config, sequential=True), dict(type='LoadOccGTFromFile'), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=True), + classes=class_names), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -267,11 +267,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -311,14 +311,14 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet_occ/bevdet-occ-r50-4dlongterm-stereo-24e_384704.py b/configs/bevdet_occ/bevdet-occ-r50-4dlongterm-stereo-24e_384704.py index cd029452..c404c9b1 100644 --- a/configs/bevdet_occ/bevdet-occ-r50-4dlongterm-stereo-24e_384704.py +++ b/configs/bevdet_occ/bevdet-occ-r50-4dlongterm-stereo-24e_384704.py @@ -143,11 +143,11 @@ data_config=data_config, sequential=True), dict(type='LoadOccGTFromFile'), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=True), + classes=class_names), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -163,11 +163,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -207,14 +207,14 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/bevdet_occ/bevdet-occ-stbase-4d-stereo-512x1408-24e.py b/configs/bevdet_occ/bevdet-occ-stbase-4d-stereo-512x1408-24e.py index 1a8ff952..56627b9b 100644 --- a/configs/bevdet_occ/bevdet-occ-stbase-4d-stereo-512x1408-24e.py +++ b/configs/bevdet_occ/bevdet-occ-stbase-4d-stereo-512x1408-24e.py @@ -199,11 +199,11 @@ data_config=data_config, sequential=True), dict(type='LoadOccGTFromFile'), + dict(type='LoadAnnotations'), dict( - type='LoadAnnotationsBEVDepth', + type='BEVAug', bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=True), + classes=class_names), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -219,11 +219,11 @@ test_pipeline = [ dict(type='PrepareImageInputs', data_config=data_config, sequential=True), - dict( - type='LoadAnnotationsBEVDepth', - bda_aug_conf=bda_aug_conf, - classes=class_names, - is_train=False), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), dict( type='LoadPointsFromFile', coord_type='LIDAR', @@ -263,14 +263,14 @@ test_data_config = dict( pipeline=test_pipeline, - ann_file=data_root + 'bevdetv2-nuscenes_infos_val.pkl') + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl') data = dict( samples_per_gpu=2, # with 32 GPU workers_per_gpu=4, train=dict( data_root=data_root, - ann_file=data_root + 'bevdetv2-nuscenes_infos_train.pkl', + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', pipeline=train_pipeline, classes=class_names, test_mode=False, diff --git a/configs/dal/dal-base.py b/configs/dal/dal-base.py new file mode 100644 index 00000000..ed16d504 --- /dev/null +++ b/configs/dal/dal-base.py @@ -0,0 +1,381 @@ +_base_ = [ + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/cyclic_20e.py', + '../_base_/default_runtime.py' +] +''' + +''' +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-54.0, -54.0, -3.0, 54.0, 54.0, 5.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'] + +data_config = { + 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', + 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], + 'Ncams': 5, + 'input_size': (256, 704), + 'src_size': (900, 1600), + + # Augmentation + 'resize': (-0.06, 0.44), + 'rot': (-5.4, 5.4), + 'flip': True, + 'crop_h': (0.0, 0.0), + 'random_crop_height': True, + 'vflip':True, + 'resize_test': 0.04, + + 'pmd': dict( + brightness_delta=32, + contrast_lower=0.5, + contrast_upper=1.5, + saturation_lower=0.5, + saturation_upper=1.5, + hue_delta=18, + rate=0.5 + ) +} + +grid_config = { + 'x': [-54.0, 54.0, 0.6], + 'y': [-54.0, 54.0, 0.6], + 'z': [-3, 5, 8], + 'depth': [1.0, 60.0, 0.5], +} + + +# Model +voxel_size = [0.075, 0.075, 0.2] + +feat_bev_img_dim = 32 +img_feat_dim = 128 +model = dict( + type='DAL', + use_grid_mask=True, + # camera + img_backbone=dict( + pretrained='torchvision://resnet18', + type='ResNet', + depth=18, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + style='pytorch'), + img_neck=dict( + type='CustomFPN', + in_channels=[128, 256, 512], + out_channels=img_feat_dim, + num_outs=1, + start_level=0, + out_ids=[0]), + img_view_transformer=dict( + type='LSSViewTransformer', + grid_config=grid_config, + input_size=data_config['input_size'], + in_channels=img_feat_dim, + out_channels=feat_bev_img_dim, + downsample=8, + with_depth_from_lidar=True), + + # lidar + pts_voxel_layer=dict( + max_num_points=10, voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + max_voxels=(120000, 160000)), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + base_channels=24, + sparse_shape=[41, 1440, 1440], + output_channels=192, + order=('conv', 'norm', 'act'), + encoder_channels=((24, 24, 48), + (48, 48, 96), + (96, 96, 192), + (192, 192)), + encoder_paddings=((0, 0, 1), + (0, 0, 1), + (0, 0, [0, 1, 1]), + (0, 0)), + block_type='basicblock'), + + pts_backbone=dict( + type='SECOND', + in_channels=384, + out_channels=[192, 384], + layer_nums=[8, 8], + layer_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[192, 384], + out_channels=[256, 256], + upsample_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + + # head + pts_bbox_head=dict( + type='DALHead', + + # DAL + feat_bev_img_dim=feat_bev_img_dim, + img_feat_dim=img_feat_dim, + sparse_fuse_layers=2, + dense_fuse_layers=2, + instance_attn=False, + + # Transfusion + num_proposals=300, + in_channels=512, + hidden_channel=128, + num_classes=10, + num_decoder_layers=1, + num_heads=8, + nms_kernel_size=3, + ffn_channel=256, + dropout=0.1, + bn_momentum=0.1, + activation='relu', + auxiliary=True, + common_heads=dict( + center=[2, 2], + height=[1, 2], + dim=[3, 2], + rot=[2, 2], + vel=[2, 2]), + bbox_coder=dict( + type='TransFusionBBoxCoder', + pc_range=point_cloud_range[:2], + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + score_threshold=0.0, + out_size_factor=8, + voxel_size=voxel_size[:2], + code_size=10), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0), + loss_heatmap=dict( + type='GaussianFocalLoss', reduction='mean', loss_weight=1.0), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25)), + train_cfg=dict(pts=dict( + dataset='nuScenes', + point_cloud_range=point_cloud_range, + grid_size=[1440, 1440, 40], + voxel_size=voxel_size, + out_size_factor=8, + gaussian_overlap=0.1, + min_radius=2, + pos_weight=-1, + code_weights=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], + assigner=dict( + type='HungarianAssigner3D', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + cls_cost=dict( + type='FocalLossCost', + gamma=2.0, + alpha=0.25, + weight=0.15), + reg_cost=dict(type='BBoxBEVL1Cost', weight=0.25), + iou_cost=dict(type='IoU3DCost', weight=0.25)))), + test_cfg=dict(pts=dict( + dataset='nuScenes', + grid_size=[1440, 1440, 40], + img_feat_downsample=8, + out_size_factor=8, + voxel_size=voxel_size[:2], + pc_range=point_cloud_range[:2], + nms_type=None)), +) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + +bda_aug_conf = dict( + rot_lim=(-22.5 * 2, 22.5 * 2), + scale_lim=(0.9, 1.1), + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + tran_lim=[0.5, 0.5, 0.5] +) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'bevdetv3-nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='PrepareImageInputs', + is_train=True, opencv_pp=True, + data_config=data_config), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args, + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='VelocityAug'), + dict( + type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names), + dict(type='PointToMultiViewDepthFusion', downsample=1, + grid_config=grid_config), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d', + 'img_inputs', 'gt_depth', + 'gt_bboxes_ignore' + ]) +] + +test_pipeline = [ + dict( + type='PrepareImageInputs', + is_train=False, opencv_pp=True, + data_config=data_config), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args, + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), + dict(type='PointToMultiViewDepthFusion', downsample=1, + grid_config=grid_config), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points', 'img_inputs', 'gt_depth']) + ]) +] + +input_modality = dict( + use_lidar=True, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) + +data = dict( + samples_per_gpu=4, # for 16 GPU + workers_per_gpu=6, + train=dict( + type='CBGSDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'bevdetv3-nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + use_valid_flag=True, + modality=input_modality, + img_info_prototype='bevdet', + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict(pipeline=test_pipeline, classes=class_names, + modality=input_modality, + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl', + img_info_prototype='bevdet'), + test=dict(pipeline=test_pipeline, classes=class_names, + modality=input_modality, + ann_file=data_root + 'bevdetv3-nuscenes_infos_val.pkl', + img_info_prototype='bevdet')) + +evaluation = dict(interval=20, pipeline=test_pipeline) +optimizer = dict(type='AdamW', lr=2e-4, weight_decay=0.01) # for 64 total batch size +two_stage = True +runner = dict(type='TwoStageRunner', max_epochs=20) diff --git a/configs/dal/dal-large.py b/configs/dal/dal-large.py new file mode 100644 index 00000000..7c2ca4b9 --- /dev/null +++ b/configs/dal/dal-large.py @@ -0,0 +1,384 @@ +_base_ = [ + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/cyclic_20e.py', + '../_base_/default_runtime.py' +] +''' + +''' +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -3.0, 51.2, 51.2, 5.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'] + +data_config = { + 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', + 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], + 'Ncams': 5, + 'input_size': (384, 1056), + 'src_size': (900, 1600), + + # Augmentation + 'resize': (-0.06, 0.44), + 'rot': (-5.4, 5.4), + 'flip': True, + 'crop_h': (0.00, 0.00), + 'random_crop_height': True, + 'vflip':True, + 'resize_test': 0.08, + + 'pmd': dict( + brightness_delta=32, + contrast_lower=0.5, + contrast_upper=1.5, + saturation_lower=0.5, + saturation_upper=1.5, + hue_delta=18, + rate=0.5 + ) +} + +grid_config = { + 'x': [-51.2, 51.2, 0.4], + 'y': [-51.2, 51.2, 0.4], + 'z': [-3, 5, 8], + 'depth': [1.0, 60.0, 0.5], +} + +# Model +voxel_size = [0.05, 0.05, 0.2] + +feat_bev_img_dim = 32 +img_feat_dim = 128 +model = dict( + type='DAL', + use_grid_mask=True, + # camera + img_backbone=dict( + pretrained='torchvision://resnet50', + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=True, + style='pytorch'), + img_neck=dict( + type='CustomFPN', + in_channels=[512, 1024, 2048], + out_channels=img_feat_dim, + num_outs=1, + start_level=0, + out_ids=[0]), + img_view_transformer=dict( + type='LSSViewTransformer', + grid_config=grid_config, + input_size=data_config['input_size'], + in_channels=img_feat_dim, + out_channels=feat_bev_img_dim, + downsample=8, + with_depth_from_lidar=True), + + # lidar + pts_voxel_layer=dict( + max_num_points=10, voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + max_voxels=(120000, 160000)), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + base_channels=32, + sparse_shape=[41, 2048, 2048], + output_channels=256, + order=('conv', 'norm', 'act'), + encoder_channels=((32, 32, 64), + (64, 64, 128), + (128, 128, 256), + (256, 256)), + encoder_paddings=((0, 0, 1), + (0, 0, 1), + (0, 0, [0, 1, 1]), + (0, 0)), + block_type='basicblock'), + + pts_backbone=dict( + type='SECOND', + in_channels=512, + out_channels=[128, 256, 256], + layer_nums=[3, 3, 3], + layer_strides=[1, 2, 2], + with_cp=True, + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256, 256], + out_channels=[128, 128, 128], + upsample_strides=[1, 2, 4], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + + # head + pts_bbox_head=dict( + type='DALHead', + + # DAL + feat_bev_img_dim=feat_bev_img_dim, + img_feat_dim=img_feat_dim, + sparse_fuse_layers=2, + dense_fuse_layers=2, + instance_attn=False, + + # Transfusion + num_proposals=200, + in_channels=384, + hidden_channel=128, + num_classes=10, + num_decoder_layers=1, + num_heads=8, + nms_kernel_size=3, + ffn_channel=256, + dropout=0.1, + bn_momentum=0.1, + activation='relu', + auxiliary=True, + common_heads=dict( + center=[2, 2], + height=[1, 2], + dim=[3, 2], + rot=[2, 2], + vel=[2, 2]), + bbox_coder=dict( + type='TransFusionBBoxCoder', + pc_range=point_cloud_range[:2], + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + score_threshold=0.0, + out_size_factor=8, + voxel_size=voxel_size[:2], + code_size=10), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0), + loss_heatmap=dict( + type='GaussianFocalLoss', reduction='mean', loss_weight=1.0), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25)), + train_cfg=dict(pts=dict( + dataset='nuScenes', + point_cloud_range=point_cloud_range, + grid_size=[2048, 2048, 40], + voxel_size=voxel_size, + out_size_factor=8, + gaussian_overlap=0.1, + min_radius=2, + pos_weight=-1, + code_weights=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], + assigner=dict( + type='HungarianAssigner3D', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + cls_cost=dict( + type='FocalLossCost', + gamma=2.0, + alpha=0.25, + weight=0.15), + reg_cost=dict(type='BBoxBEVL1Cost', weight=0.25), + iou_cost=dict(type='IoU3DCost', weight=0.25)))), + test_cfg=dict(pts=dict( + dataset='nuScenes', + grid_size=[2048, 2048, 40], + img_feat_downsample=8, + out_size_factor=8, + voxel_size=voxel_size[:2], + pc_range=point_cloud_range[:2], + nms_type=None)), +) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + +bda_aug_conf = dict( + rot_lim=(-22.5 * 2, 22.5 * 2), + scale_lim=(0.9, 1.1), + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + tran_lim=[0.5, 0.5, 0.5] +) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'bevdetv5-nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='PrepareImageInputs', + is_train=True, opencv_pp=True, + data_config=data_config), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args, + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='VelocityAug'), + dict( + type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names), + dict(type='PointToMultiViewDepthFusion', downsample=1, + grid_config=grid_config), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d', + 'img_inputs', 'gt_depth', + 'gt_bboxes_ignore' + ]) +] + +test_pipeline = [ + dict( + type='PrepareImageInputs', + is_train=False, opencv_pp=True, + data_config=data_config), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args, + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), + dict(type='PointToMultiViewDepthFusion', downsample=1, + grid_config=grid_config), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points', 'img_inputs', 'gt_depth']) + ]) +] + +input_modality = dict( + use_lidar=True, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) + +data = dict( + samples_per_gpu=4, + workers_per_gpu=6, + train=dict( + type='CBGSDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'bevdetv5-nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + use_valid_flag=True, + modality=input_modality, + img_info_prototype='bevdet', + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict(pipeline=test_pipeline, classes=class_names, + modality=input_modality, + ann_file=data_root + 'bevdetv5-nuscenes_infos_val.pkl', + img_info_prototype='bevdet'), + test=dict(pipeline=test_pipeline, classes=class_names, + modality=input_modality, + ann_file=data_root + 'bevdetv5-nuscenes_infos_val.pkl', + img_info_prototype='bevdet')) + + +evaluation = dict(interval=20, pipeline=test_pipeline) +optimizer = dict(type='AdamW', lr=2e-4, weight_decay=0.01) # for 64 total batch size +two_stage = True +runner = dict(type='TwoStageRunner', max_epochs=20) + +num_proposals_test = 400 \ No newline at end of file diff --git a/configs/dal/dal-tiny.py b/configs/dal/dal-tiny.py new file mode 100644 index 00000000..a7a29c31 --- /dev/null +++ b/configs/dal/dal-tiny.py @@ -0,0 +1,379 @@ +_base_ = [ + '../_base_/datasets/nus-3d.py', + '../_base_/schedules/cyclic_20e.py', + '../_base_/default_runtime.py' +] +''' + +''' +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -3.0, 51.2, 51.2, 5.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'] + +data_config = { + 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', + 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], + 'Ncams': 5, + 'input_size': (192, 544), + 'src_size': (900, 1600), + + # Augmentation + 'resize': (-0.06, 0.44), + 'rot': (-5.4, 5.4), + 'flip': True, + 'crop_h': (0.0, 0.0), + 'random_crop_height': True, + 'vflip':True, + 'resize_test': 0.04, + + 'pmd': dict( + brightness_delta=32, + contrast_lower=0.5, + contrast_upper=1.5, + saturation_lower=0.5, + saturation_upper=1.5, + hue_delta=18, + rate=0.5 + ) +} + +grid_config = { + 'x': [-51.2, 51.2, 0.8], + 'y': [-51.2, 51.2, 0.8], + 'z': [-3, 5, 8], + 'depth': [1.0, 60.0, 0.5], +} + +# Model +voxel_size = [0.1, 0.1, 0.2] + +feat_bev_img_dim = 32 +img_feat_dim = 128 +model = dict( + type='DAL', + use_grid_mask=True, + # camera + img_backbone=dict( + pretrained="/mnt/cfs/algorithm/junjie.huang/models/resnet18-f37072fd.pth", + type='ResNet', + depth=18, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + style='pytorch'), + img_neck=dict( + type='CustomFPN', + in_channels=[128, 256, 512], + out_channels=img_feat_dim, + num_outs=1, + start_level=0, + out_ids=[0]), + img_view_transformer=dict( + type='LSSViewTransformer', + grid_config=grid_config, + input_size=data_config['input_size'], + in_channels=img_feat_dim, + out_channels=feat_bev_img_dim, + downsample=8, + with_depth_from_lidar=True), + + # lidar + pts_voxel_layer=dict( + max_num_points=10, voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + max_voxels=(90000, 120000)), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[41, 1024, 1024], + output_channels=128, + order=('conv', 'norm', 'act'), + encoder_channels=((16, 16, 32), + (32, 32, 64), + (64, 64, 128), + (128, 128)), + encoder_paddings=((0, 0, 1), + (0, 0, 1), + (0, 0, [0, 1, 1]), + (0, 0)), + block_type='basicblock'), + pts_backbone=dict( + type='SECOND', + in_channels=256, + out_channels=[128, 256], + layer_nums=[5, 5], + layer_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + out_channels=[256, 256], + upsample_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + + # head + pts_bbox_head=dict( + type='DALHead', + + # DAL + feat_bev_img_dim=feat_bev_img_dim, + img_feat_dim=img_feat_dim, + sparse_fuse_layers=2, + dense_fuse_layers=2, + instance_attn=False, + + # Transfusion + num_proposals=200, + in_channels=512, + hidden_channel=128, + num_classes=10, + num_decoder_layers=1, + num_heads=8, + nms_kernel_size=3, + ffn_channel=256, + dropout=0.1, + bn_momentum=0.1, + activation='relu', + auxiliary=True, + common_heads=dict( + center=[2, 2], + height=[1, 2], + dim=[3, 2], + rot=[2, 2], + vel=[2, 2]), + bbox_coder=dict( + type='TransFusionBBoxCoder', + pc_range=point_cloud_range[:2], + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + score_threshold=0.0, + out_size_factor=8, + voxel_size=voxel_size[:2], + code_size=10), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0), + loss_heatmap=dict( + type='GaussianFocalLoss', reduction='mean', loss_weight=1.0), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25)), + train_cfg=dict(pts=dict( + dataset='nuScenes', + point_cloud_range=point_cloud_range, + grid_size=[1024, 1024, 40], + voxel_size=voxel_size, + out_size_factor=8, + gaussian_overlap=0.1, + min_radius=2, + pos_weight=-1, + code_weights=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 + ], + assigner=dict( + type='HungarianAssigner3D', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + cls_cost=dict( + type='FocalLossCost', + gamma=2.0, + alpha=0.25, + weight=0.15), + reg_cost=dict(type='BBoxBEVL1Cost', weight=0.25), + iou_cost=dict(type='IoU3DCost', weight=0.25)))), + test_cfg=dict(pts=dict( + dataset='nuScenes', + grid_size=[1024, 1024, 40], + img_feat_downsample=8, + out_size_factor=8, + voxel_size=voxel_size[:2], + pc_range=point_cloud_range[:2], + nms_type=None)), +) + +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + +bda_aug_conf = dict( + rot_lim=(-22.5 * 2, 22.5 * 2), + scale_lim=(0.9, 1.1), + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + tran_lim=[0.5, 0.5, 0.5] +) + +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'bevdetv5-nuscenes_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict( + car=5, + truck=5, + bus=5, + trailer=5, + construction_vehicle=5, + traffic_cone=5, + barrier=5, + motorcycle=5, + bicycle=5, + pedestrian=5)), + classes=class_names, + sample_groups=dict( + car=2, + truck=3, + construction_vehicle=7, + bus=4, + trailer=6, + barrier=2, + motorcycle=6, + bicycle=6, + pedestrian=2, + traffic_cone=2), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='PrepareImageInputs', + is_train=True, opencv_pp=True, + data_config=data_config), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args, + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict(type='ObjectSample', db_sampler=db_sampler), + dict(type='VelocityAug'), + dict( + type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names), + dict(type='PointToMultiViewDepthFusion', downsample=1, + grid_config=grid_config), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d', + 'img_inputs', 'gt_depth', + 'gt_bboxes_ignore' + ]) +] + +test_pipeline = [ + dict( + type='PrepareImageInputs', + is_train=False, opencv_pp=True, + data_config=data_config), + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=9, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args, + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict(type='BEVAug', + bda_aug_conf=bda_aug_conf, + classes=class_names, + is_train=False), + dict(type='PointToMultiViewDepthFusion', downsample=1, + grid_config=grid_config), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points', 'img_inputs', 'gt_depth']) + ]) +] + +input_modality = dict( + use_lidar=True, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) + +data = dict( + samples_per_gpu=4, # for 16 GPU + workers_per_gpu=6, + train=dict( + type='CBGSDataset', + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'bevdetv5-nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + use_valid_flag=True, + modality=input_modality, + img_info_prototype='bevdet', + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict(pipeline=test_pipeline, classes=class_names, + modality=input_modality, + ann_file=data_root + 'bevdetv5-nuscenes_infos_val.pkl', + img_info_prototype='bevdet'), + test=dict(pipeline=test_pipeline, classes=class_names, + modality=input_modality, + ann_file=data_root + 'bevdetv5-nuscenes_infos_val.pkl', + img_info_prototype='bevdet')) + +evaluation = dict(interval=20, pipeline=test_pipeline) +optimizer = dict(type='AdamW', lr=2e-4, weight_decay=0.01) # for 64 total batch size +two_stage = True +runner = dict(type='TwoStageRunner', max_epochs=20) + diff --git a/docs/en/news.md b/docs/en/news.md index 23a86a33..656737cf 100644 --- a/docs/en/news.md +++ b/docs/en/news.md @@ -1,5 +1,9 @@ ## News - +- **2023.05.07** Improve the occpancy baseline by enlarging the input size and using long-term temporal fusion. +- **2023.04.28** Support task of [occupancy prediction](https://github.com/CVPR2023-3D-Occupancy-Prediction/CVPR2023-3D-Occupancy-Prediction) . +- **2023.04.27** Equip BEVDet with stereo depth estimation. +- **2023.04.10** Use single head for multi-class prediction. +- **2023.01.12** Support TensorRT-INT8. * **2022.11.24** A new branch of bevdet codebase, dubbed dev2.0, is released. dev2.0 includes the following features: 1. support **BEVPoolv2**, whose inference speed is up to **15.1 times** the previous fastest implementation of Lift-Splat-Shoot view transformer. It is also far less memory consumption. ![bevpoolv2](../../resources/bevpoolv2.png) diff --git a/mmdet3d/core/__init__.py b/mmdet3d/core/__init__.py index d40e6502..fc98b766 100644 --- a/mmdet3d/core/__init__.py +++ b/mmdet3d/core/__init__.py @@ -8,3 +8,4 @@ from .utils import * # noqa: F401, F403 from .visualizer import * # noqa: F401, F403 from .voxel import * # noqa: F401, F403 +from .two_stage_runner import * \ No newline at end of file diff --git a/mmdet3d/core/bbox/assigners/__init__.py b/mmdet3d/core/bbox/assigners/__init__.py index d1493687..069f903d 100644 --- a/mmdet3d/core/bbox/assigners/__init__.py +++ b/mmdet3d/core/bbox/assigners/__init__.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. from mmdet.core.bbox import AssignResult, BaseAssigner, MaxIoUAssigner - -__all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult'] +from .hungarian_assigner_3d import HungarianAssigner3D +__all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult', + 'HungarianAssigner3D'] diff --git a/mmdet3d/core/bbox/assigners/hungarian_assigner_3d.py b/mmdet3d/core/bbox/assigners/hungarian_assigner_3d.py new file mode 100644 index 00000000..6df57fca --- /dev/null +++ b/mmdet3d/core/bbox/assigners/hungarian_assigner_3d.py @@ -0,0 +1,148 @@ +from mmdet.core.bbox.builder import BBOX_ASSIGNERS +from mmdet.core.bbox.assigners import AssignResult, BaseAssigner +from mmdet.core.bbox.match_costs import build_match_cost +from mmdet.core.bbox.match_costs.builder import MATCH_COST +from mmdet.core.bbox.iou_calculators import build_iou_calculator +import torch + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + +@MATCH_COST.register_module() +class BBoxBEVL1Cost(object): + def __init__(self, weight): + self.weight = weight + + def __call__(self, bboxes, gt_bboxes, train_cfg): + pc_start = bboxes.new(train_cfg['point_cloud_range'][0:2]) + pc_range = bboxes.new(train_cfg['point_cloud_range'][3:5]) - bboxes.new(train_cfg['point_cloud_range'][0:2]) + # normalize the box center to [0, 1] + normalized_bboxes_xy = (bboxes[:, :2] - pc_start) / pc_range + normalized_gt_bboxes_xy = (gt_bboxes[:, :2] - pc_start) / pc_range + reg_cost = torch.cdist(normalized_bboxes_xy, normalized_gt_bboxes_xy, p=1) + return reg_cost * self.weight + + +@MATCH_COST.register_module() +class IoU3DCost(object): + def __init__(self, weight): + self.weight = weight + + def __call__(self, iou): + iou_cost = - iou + return iou_cost * self.weight + + +@BBOX_ASSIGNERS.register_module() +class HeuristicAssigner3D(BaseAssigner): + def __init__(self, + dist_thre=100, + iou_calculator=dict(type='BboxOverlaps3D') + ): + self.dist_thre = dist_thre # distance in meter + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None, query_labels=None): + dist_thre = self.dist_thre + num_gts, num_bboxes = len(gt_bboxes), len(bboxes) + + bev_dist = torch.norm(bboxes[:, 0:2][None, :, :] - gt_bboxes[:, 0:2][:, None, :], dim=-1) # [num_gts, num_bboxes] + if query_labels is not None: + # only match the gt box and query with same category + not_same_class = (query_labels[None] != gt_labels[:, None]) + bev_dist += not_same_class * dist_thre + + # for each gt box, assign it to the nearest pred box + nearest_values, nearest_indices = bev_dist.min(1) # [num_gts] + assigned_gt_inds = torch.ones([num_bboxes, ]).to(bboxes) * 0 + assigned_gt_vals = torch.ones([num_bboxes, ]).to(bboxes) * 10000 + assigned_gt_labels = torch.ones([num_bboxes, ]).to(bboxes) * -1 + for idx_gts in range(num_gts): + # for idx_pred in torch.where(bev_dist[idx_gts] < dist_thre)[0]: # each gt match to all the pred box within some radius + idx_pred = nearest_indices[idx_gts] # each gt only match to the nearest pred box + if bev_dist[idx_gts, idx_pred] <= dist_thre: + if bev_dist[idx_gts, idx_pred] < assigned_gt_vals[idx_pred]: # if this pred box is assigned, then compare + assigned_gt_vals[idx_pred] = bev_dist[idx_gts, idx_pred] + assigned_gt_inds[idx_pred] = idx_gts + 1 # for AssignResult, 0 is negative, -1 is ignore, 1-based indices are positive + assigned_gt_labels[idx_pred] = gt_labels[idx_gts] + + max_overlaps = torch.zeros([num_bboxes, ]).to(bboxes) + matched_indices = torch.where(assigned_gt_inds > 0) + matched_iou = self.iou_calculator(gt_bboxes[assigned_gt_inds[matched_indices].long() - 1], bboxes[matched_indices]).diag() + max_overlaps[matched_indices] = matched_iou + + return AssignResult( + num_gts, assigned_gt_inds.long(), max_overlaps, labels=assigned_gt_labels + ) + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner3D(BaseAssigner): + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxBEVL1Cost', weight=1.0), + iou_cost=dict(type='IoU3DCost', weight=1.0), + iou_calculator=dict(type='BboxOverlaps3D'), + ): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + self.iou_calculator = build_iou_calculator(iou_calculator) + + def assign(self, bboxes, gt_bboxes, gt_labels, cls_pred, train_cfg): + num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bboxes.new_full((num_bboxes,), + -1, + dtype=torch.long) + assigned_labels = bboxes.new_full((num_bboxes,), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + + # 2. compute the weighted costs + # see mmdetection/mmdet/core/bbox/match_costs/match_cost.py + cls_cost = self.cls_cost(cls_pred[0].T, gt_labels) + reg_cost = self.reg_cost(bboxes, gt_bboxes, train_cfg) + + iou = self.iou_calculator(bboxes, gt_bboxes) + iou_cost = self.iou_cost(iou) + + # weighted sum of above three costs + cost = cls_cost + reg_cost + iou_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + try: + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + except: + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + matched_row_inds = torch.from_numpy(matched_row_inds).to(bboxes.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to(bboxes.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + + max_overlaps = torch.zeros_like(iou.max(1).values) + max_overlaps[matched_row_inds] = iou[matched_row_inds, matched_col_inds] + # max_overlaps = iou.max(1).values + return AssignResult( + num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/mmdet3d/core/bbox/coders/__init__.py b/mmdet3d/core/bbox/coders/__init__.py index b306525c..bed2c604 100644 --- a/mmdet3d/core/bbox/coders/__init__.py +++ b/mmdet3d/core/bbox/coders/__init__.py @@ -10,10 +10,11 @@ from .pgd_bbox_coder import PGDBBoxCoder from .point_xyzwhlr_bbox_coder import PointXYZWHLRBBoxCoder from .smoke_bbox_coder import SMOKECoder +from .transfusion_bbox_coder import TransFusionBBoxCoder __all__ = [ 'build_bbox_coder', 'DeltaXYZWLHRBBoxCoder', 'PartialBinBasedBBoxCoder', 'CenterPointBBoxCoder', 'AnchorFreeBBoxCoder', 'GroupFree3DBBoxCoder', 'PointXYZWHLRBBoxCoder', 'FCOS3DBBoxCoder', 'PGDBBoxCoder', 'SMOKECoder', - 'MonoFlexCoder' + 'MonoFlexCoder', 'TransFusionBBoxCoder' ] diff --git a/mmdet3d/core/bbox/coders/transfusion_bbox_coder.py b/mmdet3d/core/bbox/coders/transfusion_bbox_coder.py new file mode 100644 index 00000000..0d495dbc --- /dev/null +++ b/mmdet3d/core/bbox/coders/transfusion_bbox_coder.py @@ -0,0 +1,124 @@ +import torch + +from mmdet.core.bbox import BaseBBoxCoder +from mmdet.core.bbox.builder import BBOX_CODERS + + +@BBOX_CODERS.register_module() +class TransFusionBBoxCoder(BaseBBoxCoder): + def __init__(self, + pc_range, + out_size_factor, + voxel_size, + post_center_range=None, + score_threshold=None, + code_size=8, + ): + self.pc_range = pc_range + self.out_size_factor = out_size_factor + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.score_threshold = score_threshold + self.code_size = code_size + + def encode(self, dst_boxes): + targets = torch.zeros([dst_boxes.shape[0], self.code_size]).to(dst_boxes.device) + targets[:, 0] = (dst_boxes[:, 0] - self.pc_range[0]) / (self.out_size_factor * self.voxel_size[0]) + targets[:, 1] = (dst_boxes[:, 1] - self.pc_range[1]) / (self.out_size_factor * self.voxel_size[1]) + # targets[:, 2] = (dst_boxes[:, 2] - self.post_center_range[2]) / (self.post_center_range[5] - self.post_center_range[2]) + targets[:, 3] = dst_boxes[:, 3].log() + targets[:, 4] = dst_boxes[:, 4].log() + targets[:, 5] = dst_boxes[:, 5].log() + targets[:, 2] = dst_boxes[:, 2] + dst_boxes[:, 5] * 0.5 # bottom center to gravity center + targets[:, 6] = torch.sin(dst_boxes[:, 6]) + targets[:, 7] = torch.cos(dst_boxes[:, 6]) + if self.code_size == 10: + targets[:, 8:10] = dst_boxes[:, 7:] + return targets + + def decode(self, heatmap, rot, dim, center, height, vel, filter=False): + """Decode bboxes. + Args: + heat (torch.Tensor): Heatmap with the shape of [B, num_cls, num_proposals]. + rot (torch.Tensor): Rotation with the shape of + [B, 1, num_proposals]. + dim (torch.Tensor): Dim of the boxes with the shape of + [B, 3, num_proposals]. + center (torch.Tensor): bev center of the boxes with the shape of + [B, 2, num_proposals]. (in feature map metric) + hieght (torch.Tensor): height of the boxes with the shape of + [B, 2, num_proposals]. (in real world metric) + vel (torch.Tensor): Velocity with the shape of [B, 2, num_proposals]. + filter: if False, return all box without checking score and center_range + Returns: + list[dict]: Decoded boxes. + """ + # class label + final_preds = heatmap.max(1, keepdims=False).indices + final_scores = heatmap.max(1, keepdims=False).values + + # change size to real world metric + center[:, 0, :] = center[:, 0, :] * self.out_size_factor * self.voxel_size[0] + self.pc_range[0] + center[:, 1, :] = center[:, 1, :] * self.out_size_factor * self.voxel_size[1] + self.pc_range[1] + # center[:, 2, :] = center[:, 2, :] * (self.post_center_range[5] - self.post_center_range[2]) + self.post_center_range[2] + dim[:, 0, :] = dim[:, 0, :].exp() + dim[:, 1, :] = dim[:, 1, :].exp() + dim[:, 2, :] = dim[:, 2, :].exp() + height = height - dim[:, 2:3, :] * 0.5 # gravity center to bottom center + rots, rotc = rot[:, 0:1, :], rot[:, 1:2, :] + rot = torch.atan2(rots, rotc) + + if vel is None: + final_box_preds = torch.cat([center, height, dim, rot], dim=1).permute(0, 2, 1) + else: + final_box_preds = torch.cat([center, height, dim, rot, vel], dim=1).permute(0, 2, 1) + + predictions_dicts = [] + for i in range(heatmap.shape[0]): + boxes3d = final_box_preds[i] + scores = final_scores[i] + labels = final_preds[i] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + predictions_dicts.append(predictions_dict) + + if filter is False: + return predictions_dicts + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=heatmap.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(2) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(2) + + predictions_dicts = [] + for i in range(heatmap.shape[0]): + cmask = mask[i, :] + if self.score_threshold: + cmask &= thresh_mask[i] + + boxes3d = final_box_preds[i, cmask] + scores = final_scores[i, cmask] + labels = final_preds[i, cmask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + predictions_dicts.append(predictions_dict) + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + + return predictions_dicts \ No newline at end of file diff --git a/mmdet3d/core/two_stage_runner.py b/mmdet3d/core/two_stage_runner.py new file mode 100644 index 00000000..8a404318 --- /dev/null +++ b/mmdet3d/core/two_stage_runner.py @@ -0,0 +1,82 @@ +import time +import warnings + +import mmcv +from mmcv.runner.utils import get_host_info +from mmcv.runner import EpochBasedRunner +from mmcv.runner.builder import RUNNERS + + +@RUNNERS.register_module() +class TwoStageRunner(EpochBasedRunner): + def __init__(self, first_stage_ratio=0.75, **kwargs): + super(TwoStageRunner, self).__init__(**kwargs) + self.switch_epoch = first_stage_ratio * self._max_epochs + + def run(self, data_loaders, workflow, max_epochs=None, **kwargs): + """Start running. + + Args: + data_loaders (list[:obj:`DataLoader`]): Dataloaders for training + and validation. + workflow (list[tuple]): A list of (phase, epochs) to specify the + running order and epochs. E.g, [('train', 2), ('val', 1)] means + running 2 epochs for training and 1 epoch for validation, + iteratively. + """ + assert isinstance(data_loaders, list) + assert len(data_loaders) == 2 + assert mmcv.is_list_of(workflow, tuple) + assert len(workflow) == 1 + if max_epochs is not None: + warnings.warn( + 'setting max_epochs in run is deprecated, ' + 'please set max_epochs in runner_config', DeprecationWarning) + self._max_epochs = max_epochs + + assert self._max_epochs is not None, ( + 'max_epochs must be specified during instantiation') + + for i, flow in enumerate(workflow): + mode, epochs = flow + if mode == 'train': + self._max_iters = self._max_epochs * len(data_loaders[i]) + break + + work_dir = self.work_dir if self.work_dir is not None else 'NONE' + self.logger.info('Start running, host: %s, work_dir: %s', + get_host_info(), work_dir) + self.logger.info('Hooks will be executed in the following order:\n%s', + self.get_hook_info()) + self.logger.info('workflow: %s, max: %d epochs', workflow, + self._max_epochs) + self.call_hook('before_run') + + while self.epoch < self._max_epochs: + for i, flow in enumerate(workflow): + mode, epochs = flow + if isinstance(mode, str): # self.train() + if not hasattr(self, mode): + raise ValueError( + f'runner has no method named "{mode}" to run an ' + 'epoch') + epoch_runner = getattr(self, mode) + else: + raise TypeError( + 'mode in workflow must be a str, but got {}'.format( + type(mode))) + + for _ in range(epochs): + if mode == 'train' and self.epoch >= self._max_epochs: + break + data_loader_curr = data_loaders[0] if self.epochpmd.get('rate', 1.0): + return img + + img = np.array(img).astype(np.float32) + assert img.dtype == np.float32, \ + 'PhotoMetricDistortion needs the input image of dtype np.float32,' \ + ' please set "to_float32=True" in "LoadImageFromFile" pipeline' + # random brightness + if np.random.randint(2): + delta = np.random.uniform(-pmd['brightness_delta'], + pmd['brightness_delta']) + img += delta + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = np.random.randint(2) + if mode == 1: + if np.random.randint(2): + alpha = np.random.uniform(pmd['contrast_lower'], + pmd['contrast_upper']) + img *= alpha + + # convert color from BGR to HSV + img = mmcv.bgr2hsv(img) + + # random saturation + if np.random.randint(2): + img[..., 1] *= np.random.uniform(pmd['saturation_lower'], + pmd['saturation_upper']) + + # random hue + if np.random.randint(2): + img[..., 0] += np.random.uniform(-pmd['hue_delta'], pmd['hue_delta']) + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = mmcv.hsv2bgr(img) + + # random contrast + if mode == 0: + if np.random.randint(2): + alpha = np.random.uniform(pmd['contrast_lower'], + pmd['contrast_upper']) + img *= alpha + + # randomly swap channels + if np.random.randint(2): + img = img[..., np.random.permutation(3)] + return Image.fromarray(img.astype(np.uint8)) + def get_inputs(self, results, flip=None, scale=None): imgs = [] sensor2egos = [] @@ -973,6 +1098,9 @@ def get_inputs(self, results, flip=None, scale=None): post_tran[:2] = post_tran2 post_rot[:2, :2] = post_rot2 + if self.is_train and self.data_config.get('pmd', None) is not None: + img = self.photo_metric_distortion(img, self.data_config['pmd']) + canvas.append(np.array(img)) imgs.append(self.normalize_img(img)) @@ -981,12 +1109,20 @@ def get_inputs(self, results, flip=None, scale=None): for adj_info in results['adjacent']: filename_adj = adj_info['cams'][cam_name]['data_path'] img_adjacent = Image.open(filename_adj) - img_adjacent = self.img_transform_core( - img_adjacent, - resize_dims=resize_dims, - crop=crop, - flip=flip, - rotate=rotate) + if self.opencv_pp: + img_adjacent = \ + self.img_transform_core_opencv( + img_adjacent, + post_rot[:2, :2], + post_tran[:2], + crop) + else: + img_adjacent = self.img_transform_core( + img_adjacent, + resize_dims=resize_dims, + crop=crop, + flip=flip, + rotate=rotate) imgs.append(self.normalize_img(img_adjacent)) intrins.append(intrin) sensor2egos.append(sensor2ego) @@ -1023,7 +1159,22 @@ def __call__(self, results): @PIPELINES.register_module() -class LoadAnnotationsBEVDepth(object): +class LoadAnnotations(object): + + def __call__(self, results): + gt_boxes, gt_labels = results['ann_infos'] + gt_boxes, gt_labels = torch.Tensor(gt_boxes), torch.tensor(gt_labels) + if len(gt_boxes) == 0: + gt_boxes = torch.zeros(0, 9) + results['gt_bboxes_3d'] = \ + LiDARInstance3DBoxes(gt_boxes, box_dim=gt_boxes.shape[-1], + origin=(0.5, 0.5, 0.5)) + results['gt_labels_3d'] = gt_labels + return results + + +@PIPELINES.register_module() +class BEVAug(object): def __init__(self, bda_aug_conf, classes, is_train=True): self.bda_aug_conf = bda_aug_conf @@ -1037,15 +1188,18 @@ def sample_bda_augmentation(self): scale_bda = np.random.uniform(*self.bda_aug_conf['scale_lim']) flip_dx = np.random.uniform() < self.bda_aug_conf['flip_dx_ratio'] flip_dy = np.random.uniform() < self.bda_aug_conf['flip_dy_ratio'] + translation_std = self.bda_aug_conf.get('tran_lim', [0.0, 0.0, 0.0]) + tran_bda = np.random.normal(scale=translation_std, size=3).T else: rotate_bda = 0 scale_bda = 1.0 flip_dx = False flip_dy = False - return rotate_bda, scale_bda, flip_dx, flip_dy + tran_bda = np.zeros((1, 3), dtype=np.float32) + return rotate_bda, scale_bda, flip_dx, flip_dy, tran_bda def bev_transform(self, gt_boxes, rotate_angle, scale_ratio, flip_dx, - flip_dy): + flip_dy, tran_bda): rotate_angle = torch.tensor(rotate_angle / 180 * np.pi) rot_sin = torch.sin(rotate_angle) rot_cos = torch.cos(rotate_angle) @@ -1074,28 +1228,36 @@ def bev_transform(self, gt_boxes, rotate_angle, scale_ratio, flip_dx, gt_boxes[:, 6] = -gt_boxes[:, 6] gt_boxes[:, 7:] = ( rot_mat[:2, :2] @ gt_boxes[:, 7:].unsqueeze(-1)).squeeze(-1) + gt_boxes[:, :3] = gt_boxes[:, :3] + tran_bda return gt_boxes, rot_mat def __call__(self, results): - gt_boxes, gt_labels = results['ann_infos'] - gt_boxes, gt_labels = torch.Tensor(gt_boxes), torch.tensor(gt_labels) - rotate_bda, scale_bda, flip_dx, flip_dy = self.sample_bda_augmentation( - ) + gt_boxes = results['gt_bboxes_3d'].tensor + gt_boxes[:,2] = gt_boxes[:,2] + 0.5*gt_boxes[:,5] + rotate_bda, scale_bda, flip_dx, flip_dy, tran_bda = \ + self.sample_bda_augmentation() bda_mat = torch.zeros(4, 4) bda_mat[3, 3] = 1 gt_boxes, bda_rot = self.bev_transform(gt_boxes, rotate_bda, scale_bda, - flip_dx, flip_dy) + flip_dx, flip_dy, tran_bda) + if 'points' in results: + points = results['points'].tensor + points_aug = (bda_rot @ points[:, :3].unsqueeze(-1)).squeeze(-1) + points[:,:3] = points_aug + tran_bda + points = results['points'].new_point(points) + results['points'] = points bda_mat[:3, :3] = bda_rot + bda_mat[:3, 3] = torch.from_numpy(tran_bda) if len(gt_boxes) == 0: gt_boxes = torch.zeros(0, 9) results['gt_bboxes_3d'] = \ LiDARInstance3DBoxes(gt_boxes, box_dim=gt_boxes.shape[-1], origin=(0.5, 0.5, 0.5)) - results['gt_labels_3d'] = gt_labels - imgs, rots, trans, intrins = results['img_inputs'][:4] - post_rots, post_trans = results['img_inputs'][4:] - results['img_inputs'] = (imgs, rots, trans, intrins, post_rots, - post_trans, bda_rot) + if 'img_inputs' in results: + imgs, rots, trans, intrins = results['img_inputs'][:4] + post_rots, post_trans = results['img_inputs'][4:] + results['img_inputs'] = (imgs, rots, trans, intrins, post_rots, + post_trans, bda_mat) if 'voxel_semantics' in results: if flip_dx: results['voxel_semantics'] = results['voxel_semantics'][::-1,...].copy() diff --git a/mmdet3d/datasets/pipelines/transforms_3d.py b/mmdet3d/datasets/pipelines/transforms_3d.py index d2dc0760..a960dd38 100644 --- a/mmdet3d/datasets/pipelines/transforms_3d.py +++ b/mmdet3d/datasets/pipelines/transforms_3d.py @@ -4,6 +4,7 @@ import cv2 import numpy as np +from pyquaternion.quaternion import Quaternion from mmcv import is_tuple_of from mmcv.utils import build_from_cfg @@ -493,7 +494,7 @@ def __call__(self, input_dict): gt_labels_3d, img=None, ground_plane=ground_plane) - + num_exist = gt_labels_3d.shape[0] if sampled_dict is not None: sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d'] sampled_points = sampled_dict['points'] @@ -516,7 +517,10 @@ def __call__(self, input_dict): input_dict['gt_bboxes'] = gt_bboxes_2d input_dict['img'] = sampled_dict['img'] - + gt_bboxes_ignore = np.ones_like(gt_labels_3d) + gt_bboxes_ignore[num_exist:] = 0 + gt_bboxes_ignore = gt_bboxes_ignore.astype(np.bool) + input_dict['gt_bboxes_ignore'] = gt_bboxes_ignore input_dict['gt_bboxes_3d'] = gt_bboxes_3d input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.int64) input_dict['points'] = points @@ -917,6 +921,11 @@ def __call__(self, input_dict): gt_bboxes_3d = input_dict['gt_bboxes_3d'] gt_labels_3d = input_dict['gt_labels_3d'] mask = gt_bboxes_3d.in_range_bev(bev_range) + + if 'gt_bboxes_ignore' in input_dict: + gt_bboxes_ignore = input_dict['gt_bboxes_ignore'] + gt_bboxes_ignore = gt_bboxes_ignore[mask.numpy().astype(np.bool)] + input_dict['gt_bboxes_ignore'] = gt_bboxes_ignore gt_bboxes_3d = gt_bboxes_3d[mask] # mask is a torch tensor but gt_labels_3d is still numpy array # using mask to index gt_labels_3d will cause bug when @@ -1010,7 +1019,10 @@ def __call__(self, input_dict): dtype=np.bool_) input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask] input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask] - + if 'gt_bboxes_ignore' in input_dict: + gt_bboxes_ignore = input_dict['gt_bboxes_ignore'] + gt_bboxes_ignore = gt_bboxes_ignore[gt_bboxes_mask] + input_dict['gt_bboxes_ignore'] = gt_bboxes_ignore return input_dict def __repr__(self): @@ -1851,3 +1863,201 @@ def __repr__(self): repr_str += f'(shift_scale={self.shift_scale}, ' repr_str += f'aug_prob={self.aug_prob}) ' return repr_str + + +@PIPELINES.register_module() +class ToEgo(object): + def __init__(self, ego_cam='CAM_FRONT',): + self.ego_cam=ego_cam + + def __call__(self, results): + lidar2lidarego = np.eye(4, dtype=np.float32) + lidar2lidarego[:3, :3] = Quaternion( + results['curr']['lidar2ego_rotation']).rotation_matrix + lidar2lidarego[:3, 3] = results['curr']['lidar2ego_translation'] + + lidarego2global = np.eye(4, dtype=np.float32) + lidarego2global[:3, :3] = Quaternion( + results['curr']['ego2global_rotation']).rotation_matrix + lidarego2global[:3, 3] = results['curr']['ego2global_translation'] + + camego2global = np.eye(4, dtype=np.float32) + camego2global[:3, :3] = Quaternion( + results['curr']['cams'][self.ego_cam] + ['ego2global_rotation']).rotation_matrix + camego2global[:3, 3] = results['curr']['cams'][self.ego_cam][ + 'ego2global_translation'] + lidar2camego = np.linalg.inv(camego2global) @ lidarego2global @ lidar2lidarego + + points = results['points'].tensor.numpy() + points_ego = lidar2camego[:3,:3].reshape(1, 3, 3) @ \ + points[:, :3].reshape(-1, 3, 1) + \ + lidar2camego[:3, 3].reshape(1, 3, 1) + points[:, :3] = points_ego.squeeze(-1) + points = results['points'].new_point(points) + results['points'] = points + return results + + +@PIPELINES.register_module() +class VelocityAug(object): + def __init__(self, rate=0.5, rate_vy=0.2, rate_rotation=-1, speed_range=None, thred_vy_by_vx=1.0, + ego_cam='CAM_FRONT'): + # must be identical to that in tools/create_data_bevdet.py + self.cls = ['car', 'truck', 'construction_vehicle', + 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', + 'pedestrian', 'traffic_cone'] + self.speed_range = dict( + car=[-10, 30, 6], + truck=[-10, 30, 6], + construction_vehicle=[-10, 30, 3], + bus=[-10, 30, 3], + trailer=[-10, 30, 3], + barrier=[-5, 5, 3], + motorcycle=[-2, 25, 3], + bicycle=[-2, 15, 2], + pedestrian=[-1, 10, 2] + ) if speed_range is None else speed_range + self.rate = rate + self.thred_vy_by_vx=thred_vy_by_vx + self.rate_vy = rate_vy + self.rate_rotation = rate_rotation + self.ego_cam = ego_cam + + def interpolating(self, vx, vy, delta_t, box, rot): + delta_t_max = np.max(delta_t) + if vy ==0 or vx == 0: + delta_x = delta_t*vx + delta_y = np.zeros_like(delta_x) + rotation_interpolated = np.zeros_like(delta_x) + else: + theta = np.arctan2(abs(vy), abs(vx)) + rotation = 2 * theta + radius = 0.5 * delta_t_max * np.sqrt(vx ** 2 + vy ** 2) / np.sin(theta) + rotation_interpolated = delta_t / delta_t_max * rotation + delta_y = radius - radius * np.cos(rotation_interpolated) + delta_x = radius * np.sin(rotation_interpolated) + if vy<0: + delta_y = - delta_y + if vx<0: + delta_x = - delta_x + if np.logical_xor(vx>0, vy>0): + rotation_interpolated = -rotation_interpolated + aug = np.zeros((delta_t.shape[0],3,3), dtype=np.float32) + aug[:, 2, 2] = 1. + sin = np.sin(-rotation_interpolated) + cos = np.cos(-rotation_interpolated) + aug[:,:2,:2] = np.stack([cos,sin,-sin,cos], axis=-1).reshape(delta_t.shape[0], 2, 2) + aug[:,:2, 2] = np.stack([delta_x, delta_y], axis=-1) + + corner2center = np.eye(3) + corner2center[0, 2] = -0.5 * box[3] + + instance2ego = np.eye(3) + yaw = -box[6] + s = np.sin(yaw) + c = np.cos(yaw) + instance2ego[:2,:2] = np.stack([c,s,-s,c]).reshape(2,2) + instance2ego[:2,2] = box[:2] + corner2ego = instance2ego @ corner2center + corner2ego = corner2ego[None, ...] + if not rot == 0: + t_rot = np.eye(3) + s_rot = np.sin(-rot) + c_rot = np.cos(-rot) + t_rot[:2,:2] = np.stack([c_rot, s_rot, -s_rot, c_rot]).reshape(2,2) + + instance2ego_ = np.eye(3) + yaw_ = -box[6] - rot + s_ = np.sin(yaw_) + c_ = np.cos(yaw_) + instance2ego_[:2, :2] = np.stack([c_, s_, -s_, c_]).reshape(2, 2) + instance2ego_[:2, 2] = box[:2] + corner2ego_ = instance2ego_ @ corner2center + corner2ego_ = corner2ego_[None, ...] + t_rot = instance2ego @ t_rot @ np.linalg.inv(instance2ego) + aug = corner2ego_ @ aug @ np.linalg.inv(corner2ego_) @ t_rot[None, ...] + else: + aug = corner2ego @ aug @ np.linalg.inv(corner2ego) + return aug + + def __call__(self, results): + gt_boxes = results['gt_bboxes_3d'].tensor.numpy().copy() + gt_velocity = gt_boxes[:,7:] + gt_velocity_norm = np.sum(np.square(gt_velocity), axis=1) + points = results['points'].tensor.numpy().copy() + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes) + + for bid in range(gt_boxes.shape[0]): + cls = self.cls[results['gt_labels_3d'][bid]] + points_all = points[point_indices[:, bid]] + delta_t = np.unique(points_all[:,4]) + aug_rate_cls = self.rate if isinstance(self.rate, float) else self.rate[cls] + if points_all.shape[0]==0 or \ + delta_t.shape[0]<3 or \ + gt_velocity_norm[bid]>0.01 or \ + cls not in self.speed_range or \ + np.random.rand() > aug_rate_cls: + continue + + # sampling speed vx,vy in instance coordinate + vx = np.random.rand() * (self.speed_range[cls][1] - + self.speed_range[cls][0]) + \ + self.speed_range[cls][0] + if np.random.rand() < self.rate_vy: + max_vy = min(self.speed_range[cls][2]*2, abs(vx) * self.thred_vy_by_vx) + vy = (np.random.rand()-0.5) * max_vy + else: + vy = 0.0 + vx = -vx + + # if points_all.shape[0] == 0 or cls not in self.speed_range or gt_velocity_norm[bid]>0.01 or delta_t.shape[0]<3: + # continue + # vx = 10 + # vy = -2. + + rot = 0.0 + if np.random.rand() < self.rate_rotation: + rot = (np.random.rand()-0.5) * 1.57 + + aug = self.interpolating(vx, vy, delta_t, gt_boxes[bid], rot) + + # update rotation + gt_boxes[bid, 6] += rot + + # update velocity + delta_t_max = np.max(delta_t) + delta_t_max_index = np.argmax(delta_t) + center = gt_boxes[bid:bid+1, :2] + center_aug = center @ aug[delta_t_max_index, :2, :2].T + aug[delta_t_max_index, :2, 2] + vel = (center - center_aug) / delta_t_max + gt_boxes[bid, 7:] = vel + + # update points + for fid in range(delta_t.shape[0]): + points_curr_frame_idxes = points_all[:,4] == delta_t[fid] + + points_all[points_curr_frame_idxes, :2] = \ + points_all[points_curr_frame_idxes, :2] @ aug[fid,:2,:2].T + aug[fid,:2, 2:3].T + points[point_indices[:, bid]] = points_all + + + results['points'] = results['points'].new_point(points) + results['gt_bboxes_3d'] = results['gt_bboxes_3d'].new_box(gt_boxes) + return results + + def adjust_adj_points(self, adj_points, point_indices_adj, bid, vx, vy, rot, gt_boxes_adj, info_adj, info): + ts_diff = info['timestamp'] / 1e6 - info_adj['timestamp'] / 1e6 + points = adj_points.tensor.numpy().copy() + points_all_adj = points[point_indices_adj[:, bid]] + if points_all_adj.size>0: + delta_t_adj = np.unique(points_all_adj[:, 4]) + ts_diff + aug = self.interpolating(vx, vy, delta_t_adj, gt_boxes_adj[bid], rot) + for fid in range(delta_t_adj.shape[0]): + points_curr_frame_idxes = points_all_adj[:, 4] == delta_t_adj[fid]- ts_diff + points_all_adj[points_curr_frame_idxes, :2] = \ + points_all_adj[points_curr_frame_idxes, :2] @ aug[fid, :2, :2].T + aug[fid, :2, 2:3].T + points[point_indices_adj[:, bid]] = points_all_adj + adj_points = adj_points.new_point(points) + return adj_points \ No newline at end of file diff --git a/mmdet3d/models/backbones/second.py b/mmdet3d/models/backbones/second.py index 680dbbec..d429051c 100644 --- a/mmdet3d/models/backbones/second.py +++ b/mmdet3d/models/backbones/second.py @@ -6,7 +6,7 @@ from torch import nn as nn from ..builder import BACKBONES - +from torch.utils.checkpoint import checkpoint @BACKBONES.register_module() class SECOND(BaseModule): @@ -28,6 +28,7 @@ def __init__(self, layer_strides=[2, 2, 2], norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), conv_cfg=dict(type='Conv2d', bias=False), + with_cp=False, init_cfg=None, pretrained=None): super(SECOND, self).__init__(init_cfg=init_cfg) @@ -74,6 +75,7 @@ def __init__(self, self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) else: self.init_cfg = dict(type='Kaiming', layer='Conv2d') + self.with_cp = with_cp def forward(self, x): """Forward function. @@ -86,6 +88,9 @@ def forward(self, x): """ outs = [] for i in range(len(self.blocks)): - x = self.blocks[i](x) + if self.with_cp: + x =checkpoint(self.blocks[i], x) + else: + x = self.blocks[i](x) outs.append(x) return tuple(outs) diff --git a/mmdet3d/models/dense_heads/__init__.py b/mmdet3d/models/dense_heads/__init__.py index 154b7c4a..dc1a34c0 100644 --- a/mmdet3d/models/dense_heads/__init__.py +++ b/mmdet3d/models/dense_heads/__init__.py @@ -4,6 +4,7 @@ from .base_conv_bbox_head import BaseConvBboxHead from .base_mono3d_dense_head import BaseMono3DDenseHead from .centerpoint_head import CenterHead +from .dal_head import DALHead from .fcaf3d_head import FCAF3DHead from .fcos_mono3d_head import FCOSMono3DHead from .free_anchor3d_head import FreeAnchor3DHead @@ -15,6 +16,7 @@ from .shape_aware_head import ShapeAwareHead from .smoke_mono3d_head import SMOKEMono3DHead from .ssd_3d_head import SSD3DHead +from .transfusion_head import TransFusionHead from .vote_head import VoteHead __all__ = [ diff --git a/mmdet3d/models/dense_heads/dal_head.py b/mmdet3d/models/dense_heads/dal_head.py new file mode 100644 index 00000000..664e05dd --- /dev/null +++ b/mmdet3d/models/dense_heads/dal_head.py @@ -0,0 +1,258 @@ +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule, kaiming_init, build_conv_layer +from mmcv.runner import force_fp32 +from torch import nn +from .transfusion_head import TransFusionHead +from mmdet3d.models.builder import HEADS +from .. import builder + +__all__ = ["DALHead"] + + +def clip_sigmoid(x, eps=1e-4): + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y + + +@HEADS.register_module() +class DALHead(TransFusionHead): + def __init__(self, + img_feat_dim=128, + feat_bev_img_dim=32, + sparse_fuse_layers=2, + dense_fuse_layers=2, + **kwargs): + super(DALHead, self).__init__(**kwargs) + + # fuse net for first stage dense prediction + cfg = dict( + type='CustomResNet', + numC_input=kwargs['hidden_channel'] + feat_bev_img_dim, + num_layer=[dense_fuse_layers+1, ], + num_channels=[kwargs['hidden_channel'], ], + stride=[1, ], + backbone_output_ids=[0, ]) + self.dense_heatmap_fuse_convs = builder.build_backbone(cfg) + + # fuse net for second stage sparse prediction + fuse_convs = [] + c_in = img_feat_dim + kwargs['hidden_channel'] + feat_bev_img_dim + for i in range(sparse_fuse_layers - 1): + fuse_convs.append( + ConvModule( + c_in, + c_in, + kernel_size=1, + stride=1, + padding=0, + bias='auto', + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type="BN1d"))) + fuse_convs.append( + ConvModule( + c_in, + kwargs['hidden_channel'], + kernel_size=1, + stride=1, + padding=0, + bias='auto', + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type="BN1d"))) + self.fuse_convs = nn.Sequential(*fuse_convs) + self._init_weights() + + def _init_weights(self): + for m in self.dense_heatmap_fuse_convs.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + + @force_fp32() + def extract_img_feat_from_3dpoints(self, points, img_inputs_list, fuse=True): + if not isinstance(img_inputs_list[0], list): + img_inputs_list = [img_inputs_list] + global2keyego = torch.inverse(img_inputs_list[0][2][:,0,:,:].unsqueeze(1).to(torch.float64)) + point_img_feat_list = [] + + b, p, _ = points.shape + points = points.view(b, 1, -1, 3, 1) + for img_inputs in img_inputs_list: + img_feats = img_inputs[0].permute(0, 2, 1, 3, 4).contiguous() + _, c, n, h, w = img_feats.shape + with torch.no_grad(): + + sensor2ego, ego2global, cam2imgs, post_rots, post_trans, bda = \ + img_inputs[1:] + currego2global = ego2global[:,0,:,:].unsqueeze(1).to(torch.float64) + currego2keyego = global2keyego.matmul(currego2global).to(torch.float32) + + # aug ego to cam + augego2cam = torch.inverse(bda.view(b, 1, 4, 4).matmul(currego2keyego).matmul(sensor2ego)) + augego2cam = augego2cam.view(b, -1, 1, 4, 4) + points_cam = augego2cam[..., :3, :3].matmul(points) + points_cam += augego2cam[:, :, :, :3, 3:4] + + valid = points_cam[..., 2, 0] > 0.5 + points_img = points_cam/points_cam[..., 2:3, :] + points_img = cam2imgs.view(b, -1, 1, 3, 3).matmul(points_img) + + points_img_x = points_img[..., 0, 0] + points_img_x = points_img_x * valid + select_cam_ids = \ + torch.argmin(torch.abs(points_img_x - + cam2imgs[:, :, 0, 2:3]), dim=1) + + points_img = post_rots.view(b, -1, 1, 3, 3).matmul(points_img) + \ + post_trans.view(b, -1, 1, 3, 1) + + points_img[..., 2, 0] = points_cam[..., 2, 0] + + points_img = points_img[..., :2, 0] + index = select_cam_ids[:, None, :, None].expand(-1, -1, -1, 2) + points_img_selected = \ + points_img.gather(index=index, dim=1).squeeze(1) + + # img space to feature space + points_img_selected /= self.test_cfg['img_feat_downsample'] + + grid = torch.cat([points_img_selected, + select_cam_ids.unsqueeze(-1)], dim=2) + + normalize_factor = torch.tensor([w - 1.0, h - 1.0, n - 1.0]).to(grid) + grid = grid / normalize_factor.view(1, 1, 3) * 2.0 - 1.0 + grid = grid.view(b, p, 1, 1, 3) + point_img_feat = \ + F.grid_sample(img_feats, grid, + mode='bilinear', + align_corners=True).view(b,c,p) + point_img_feat_list.append(point_img_feat) + if not fuse: + point_img_feat = point_img_feat_list[0] + else: + point_img_feat = point_img_feat_list + return point_img_feat + + def extract_instance_img_feat(self, res_layer, img_inputs, fuse=False): + center = res_layer["center"] + height = res_layer["height"] + center_x = center[:, 0:1, :] * self.bbox_coder.out_size_factor * \ + self.bbox_coder.voxel_size[0] + self.bbox_coder.pc_range[0] + center_y = center[:, 1:2, :] * self.bbox_coder.out_size_factor * \ + self.bbox_coder.voxel_size[1] + self.bbox_coder.pc_range[1] + + ref_points = torch.cat([center_x, center_y, height], dim=1).permute(0, 2, 1) + + img_feat = self.extract_img_feat_from_3dpoints(ref_points, img_inputs, fuse=fuse) + return img_feat + + def extract_proposal(self, heatmap): + batch_size = heatmap.shape[0] + padding = self.nms_kernel_size // 2 + local_max = torch.zeros_like(heatmap) + # equals to nms radius = voxel_size * out_size_factor * kenel_size + local_max_inner = F.max_pool2d(heatmap, stride=1, padding=0, + kernel_size=self.nms_kernel_size) + local_max[:, :, padding:(-padding), padding:(-padding)] = \ + local_max_inner + ## for Pedestrian & Traffic_cone in nuScenes + if self.test_cfg["dataset"] == "nuScenes": + local_max[:, 8,] = F.max_pool2d(heatmap[:, 8], kernel_size=1, + stride=1, padding=0) + local_max[:, 9,] = F.max_pool2d(heatmap[:, 9], kernel_size=1, + stride=1, padding=0) + elif self.test_cfg["dataset"] == "Waymo": + # for Pedestrian & Cyclist in Waymo + local_max[:, 1,] = F.max_pool2d(heatmap[:, 1], kernel_size=1, + stride=1, padding=0) + local_max[:, 2,] = F.max_pool2d(heatmap[:, 2], kernel_size=1, + stride=1, padding=0) + heatmap = heatmap * (heatmap == local_max) + heatmap = heatmap.view(batch_size, heatmap.shape[1], -1) + + # top #num_proposals among all classes + top_proposals = heatmap.view(batch_size, -1) + top_proposals = top_proposals.argsort(dim=-1, descending=True) + top_proposals = top_proposals[..., :self.num_proposals] + top_proposals_class = top_proposals // heatmap.shape[-1] + top_proposals_index = top_proposals % heatmap.shape[-1] + top_proposals_index = top_proposals_index.unsqueeze(1) + return top_proposals_class, top_proposals_index + + def forward_single(self, inputs, img_inputs, bev_feat_img=None): + """Forward function for CenterPoint. + Args: + inputs (torch.Tensor): Input feature map with the shape of + [B, 512, 128(H), 128(W)]. (consistent with L748) + Returns: + list[dict]: Output results for tasks. + """ + batch_size = inputs.shape[0] + + bev_feat_lidar = self.shared_conv(inputs) + bev_feat_lidar_flatten = bev_feat_lidar.view(batch_size, bev_feat_lidar.shape[1], -1) # [BS, C, H*W] + + bev_pos = self.bev_pos.repeat(batch_size, 1, 1).to(bev_feat_lidar.device) + + # predict dense heatmap + dense_fuse_feat = torch.cat([bev_feat_lidar, bev_feat_img], + dim=1) + dense_fuse_feat = \ + self.dense_heatmap_fuse_convs(dense_fuse_feat)[0] + dense_heatmap = self.heatmap_head(dense_fuse_feat) + heatmap = dense_heatmap.detach().sigmoid() + + # generate proposal + top_proposals_class, top_proposals_index = self.extract_proposal(heatmap) + self.query_labels = top_proposals_class + + # prepare sparse lidar feat of proposal + index = top_proposals_index.expand(-1, bev_feat_lidar_flatten.shape[1], + -1) + query_feat_lidar = bev_feat_lidar_flatten.gather(index=index, dim=-1) + + # add category embedding + one_hot = F.one_hot(top_proposals_class, num_classes=self.num_classes).permute(0, 2, 1) + query_cat_encoding = self.class_encoding(one_hot.float()) + query_feat_lidar += query_cat_encoding + + query_pos_index = top_proposals_index.permute(0, 2, 1) + query_pos_index = query_pos_index.expand(-1, -1, bev_pos.shape[-1]) + query_pos = bev_pos.gather(index=query_pos_index, dim=1) + + # Prediction + res = dict() + for task in ['height', 'center', 'dim', 'rot', 'vel']: + res[task] = \ + self.prediction_heads[0].__getattr__(task)(query_feat_lidar) + res['center'] += query_pos.permute(0, 2, 1) + + # generate sparse fuse feat + query_feat_img = self.extract_instance_img_feat(res, img_inputs) + + bev_feat_img = bev_feat_img.view(batch_size, bev_feat_img.shape[1], -1) + index = top_proposals_index.expand(-1, bev_feat_img.shape[1], -1) + query_feat_img_bev = bev_feat_img.gather(index=index, dim=-1) + + query_feat_fuse = torch.cat([query_feat_lidar, query_feat_img, + query_feat_img_bev], dim=1) + query_feat_fuse = self.fuse_convs(query_feat_fuse) + res['heatmap'] = \ + self.prediction_heads[0].__getattr__('heatmap')(query_feat_fuse) + + heatmap = heatmap.view(batch_size, heatmap.shape[1], -1) + res["query_heatmap_score"] = heatmap.gather( + index=top_proposals_index.expand(-1, self.num_classes, -1), + dim=-1) # [bs, num_classes, num_proposals] + res["dense_heatmap"] = dense_heatmap + + return [res] + + def forward(self, feats): + """Forward pass. + Args: + feats (list[torch.Tensor]): Multi-level features, e.g., + features produced by FPN. + Returns: + tuple(list[dict]): Output results. first index by level, second index by layer + """ + return [self.forward_single(feats[1][0], feats[0], feats[2][0])] \ No newline at end of file diff --git a/mmdet3d/models/dense_heads/transfusion_head.py b/mmdet3d/models/dense_heads/transfusion_head.py new file mode 100644 index 00000000..873ff406 --- /dev/null +++ b/mmdet3d/models/dense_heads/transfusion_head.py @@ -0,0 +1,813 @@ +import copy + +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_conv_layer +from mmcv.runner import force_fp32 +from torch import nn + +from mmdet3d.core import ( + PseudoSampler, + circle_nms, + draw_heatmap_gaussian, + gaussian_radius, + xywhr2xyxyr, +) +from mmdet3d.models.builder import HEADS, build_loss +from mmdet3d.models.utils import FFN, PositionEmbeddingLearned, TransformerDecoderLayer + +from mmdet.core import ( + AssignResult, + build_assigner, + build_bbox_coder, + build_sampler, + multi_apply, +) + +__all__ = ["TransFusionHead"] + + +def clip_sigmoid(x, eps=1e-4): + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y + + +@HEADS.register_module() +class TransFusionHead(nn.Module): + def __init__( + self, + num_proposals=128, + auxiliary=True, + in_channels=128 * 3, + hidden_channel=128, + num_classes=4, + # config for Transformer + num_decoder_layers=3, + num_heads=8, + nms_kernel_size=1, + ffn_channel=256, + dropout=0.1, + bn_momentum=0.1, + activation="relu", + instance_attn=True, + # config for FFN + common_heads=dict(), + num_heatmap_convs=2, + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), + bias="auto", + # loss + loss_cls=dict(type="GaussianFocalLoss", reduction="mean"), + loss_iou=dict( + type="VarifocalLoss", use_sigmoid=True, iou_weighted=True, reduction="mean" + ), + loss_bbox=dict(type="L1Loss", reduction="mean"), + loss_heatmap=dict(type="GaussianFocalLoss", reduction="mean"), + # others + train_cfg=None, + test_cfg=None, + bbox_coder=None, + ): + super(TransFusionHead, self).__init__() + + self.fp16_enabled = False + + self.num_classes = num_classes + self.num_proposals = num_proposals + self.auxiliary = auxiliary + self.in_channels = in_channels + self.num_heads = num_heads + self.num_decoder_layers = num_decoder_layers + self.bn_momentum = bn_momentum + self.nms_kernel_size = nms_kernel_size + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.use_sigmoid_cls = loss_cls.get("use_sigmoid", False) + if not self.use_sigmoid_cls: + self.num_classes += 1 + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_iou = build_loss(loss_iou) + self.loss_heatmap = build_loss(loss_heatmap) + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.sampling = False + + # a shared convolution + self.shared_conv = build_conv_layer( + dict(type="Conv2d"), + in_channels, + hidden_channel, + kernel_size=3, + padding=1, + bias=bias, + ) + + layers = [] + layers.append( + ConvModule( + hidden_channel, + hidden_channel, + kernel_size=3, + padding=1, + bias=bias, + conv_cfg=dict(type="Conv2d"), + norm_cfg=dict(type="BN2d"), + ) + ) + layers.append( + build_conv_layer( + dict(type="Conv2d"), + hidden_channel, + num_classes, + kernel_size=3, + padding=1, + bias=bias, + ) + ) + self.heatmap_head = nn.Sequential(*layers) + self.class_encoding = nn.Conv1d(num_classes, hidden_channel, 1) + + # transformer decoder layers for object query with LiDAR feature + if instance_attn: + self.decoder = nn.ModuleList() + for i in range(self.num_decoder_layers): + self.decoder.append( + TransformerDecoderLayer( + hidden_channel, + num_heads, + ffn_channel, + dropout, + activation, + self_posembed=PositionEmbeddingLearned(2, hidden_channel), + cross_posembed=PositionEmbeddingLearned(2, hidden_channel), + ) + ) + else: + self.decoder = None + + # Prediction Head + self.prediction_heads = nn.ModuleList() + for i in range(self.num_decoder_layers): + heads = copy.deepcopy(common_heads) + heads.update(dict(heatmap=(self.num_classes, num_heatmap_convs))) + self.prediction_heads.append( + FFN( + hidden_channel, + heads, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=bias, + ) + ) + + self.init_weights() + self._init_assigner_sampler() + + # Position Embedding for Cross-Attention, which is re-used during training + x_size = self.test_cfg["grid_size"][0] // self.test_cfg["out_size_factor"] + y_size = self.test_cfg["grid_size"][1] // self.test_cfg["out_size_factor"] + self.bev_pos = self.create_2D_grid(x_size, y_size) + + self.img_feat_pos = None + self.img_feat_collapsed_pos = None + + def create_2D_grid(self, x_size, y_size): + meshgrid = [[0, x_size - 1, x_size], [0, y_size - 1, y_size]] + # NOTE: modified + batch_y, batch_x = torch.meshgrid( + *[torch.linspace(it[0], it[1], it[2]) for it in meshgrid] + ) + batch_x = batch_x + 0.5 + batch_y = batch_y + 0.5 + coord_base = torch.cat([batch_x[None], batch_y[None]], dim=0)[None] + coord_base = coord_base.view(1, 2, -1).permute(0, 2, 1) + return coord_base + + def init_weights(self): + # initialize transformer + if self.decoder: + for m in self.decoder.parameters(): + if m.dim() > 1: + nn.init.xavier_uniform_(m) + if hasattr(self, "query"): + nn.init.xavier_normal_(self.query) + self.init_bn_momentum() + + def init_bn_momentum(self): + for m in self.modules(): + if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)): + m.momentum = self.bn_momentum + + def _init_assigner_sampler(self): + """Initialize the target assigner and sampler of the head.""" + if self.train_cfg is None: + return + + if self.sampling: + self.bbox_sampler = build_sampler(self.train_cfg.sampler) + else: + self.bbox_sampler = PseudoSampler() + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = build_assigner(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + build_assigner(res) for res in self.train_cfg.assigner + ] + + def forward_single(self, inputs, img_inputs): + """Forward function for CenterPoint. + Args: + inputs (torch.Tensor): Input feature map with the shape of + [B, 512, 128(H), 128(W)]. (consistent with L748) + Returns: + list[dict]: Output results for tasks. + """ + batch_size = inputs.shape[0] + lidar_feat = self.shared_conv(inputs) + + ################################# + # image to BEV + ################################# + lidar_feat_flatten = lidar_feat.view( + batch_size, lidar_feat.shape[1], -1 + ) # [BS, C, H*W] + bev_pos = self.bev_pos.repeat(batch_size, 1, 1).to(lidar_feat.device) + + ################################# + # image guided query initialization + ################################# + dense_heatmap = self.heatmap_head(lidar_feat) + dense_heatmap_img = None + heatmap = dense_heatmap.detach().sigmoid() + padding = self.nms_kernel_size // 2 + local_max = torch.zeros_like(heatmap) + # equals to nms radius = voxel_size * out_size_factor * kenel_size + local_max_inner = F.max_pool2d( + heatmap, kernel_size=self.nms_kernel_size, stride=1, padding=0 + ) + local_max[:, :, padding:(-padding), padding:(-padding)] = local_max_inner + ## for Pedestrian & Traffic_cone in nuScenes + if self.test_cfg["dataset"] == "nuScenes": + local_max[ + :, + 8, + ] = F.max_pool2d(heatmap[:, 8], kernel_size=1, stride=1, padding=0) + local_max[ + :, + 9, + ] = F.max_pool2d(heatmap[:, 9], kernel_size=1, stride=1, padding=0) + elif self.test_cfg["dataset"] == "Waymo": # for Pedestrian & Cyclist in Waymo + local_max[ + :, + 1, + ] = F.max_pool2d(heatmap[:, 1], kernel_size=1, stride=1, padding=0) + local_max[ + :, + 2, + ] = F.max_pool2d(heatmap[:, 2], kernel_size=1, stride=1, padding=0) + heatmap = heatmap * (heatmap == local_max) + heatmap = heatmap.view(batch_size, heatmap.shape[1], -1) + + # top #num_proposals among all classes + top_proposals = heatmap.view(batch_size, -1).argsort(dim=-1, descending=True)[ + ..., : self.num_proposals + ] + top_proposals_class = top_proposals // heatmap.shape[-1] + top_proposals_index = top_proposals % heatmap.shape[-1] + query_feat = lidar_feat_flatten.gather( + index=top_proposals_index[:, None, :].expand( + -1, lidar_feat_flatten.shape[1], -1 + ), + dim=-1, + ) + self.query_labels = top_proposals_class + + # add category embedding + one_hot = F.one_hot(top_proposals_class, num_classes=self.num_classes).permute( + 0, 2, 1 + ) + query_cat_encoding = self.class_encoding(one_hot.float()) + query_feat += query_cat_encoding + + query_pos = bev_pos.gather( + index=top_proposals_index[:, None, :] + .permute(0, 2, 1) + .expand(-1, -1, bev_pos.shape[-1]), + dim=1, + ) + + ################################# + # transformer decoder layer (LiDAR feature as K,V) + ################################# + ret_dicts = [] + for i in range(self.num_decoder_layers): + prefix = "last_" if (i == self.num_decoder_layers - 1) else f"{i}head_" + + # Transformer Decoder Layer + # :param query: B C Pq :param query_pos: B Pq 3/6 + query_feat = self.decoder[i]( + query_feat, lidar_feat_flatten, query_pos, bev_pos + ) + + # Prediction + res_layer = self.prediction_heads[i](query_feat) + res_layer["center"] = res_layer["center"] + query_pos.permute(0, 2, 1) + first_res_layer = res_layer + ret_dicts.append(res_layer) + + # for next level positional embedding + query_pos = res_layer["center"].detach().clone().permute(0, 2, 1) + + ################################# + # transformer decoder layer (img feature as K,V) + ################################# + ret_dicts[0]["query_heatmap_score"] = heatmap.gather( + index=top_proposals_index[:, None, :].expand(-1, self.num_classes, -1), + dim=-1, + ) # [bs, num_classes, num_proposals] + ret_dicts[0]["dense_heatmap"] = dense_heatmap + + if self.auxiliary is False: + # only return the results of last decoder layer + return [ret_dicts[-1]] + + # return all the layer's results for auxiliary superivison + new_res = {} + for key in ret_dicts[0].keys(): + if key not in ["dense_heatmap", "dense_heatmap_old", "query_heatmap_score"]: + new_res[key] = torch.cat( + [ret_dict[key] for ret_dict in ret_dicts], dim=-1 + ) + else: + new_res[key] = ret_dicts[0][key] + return [new_res] + + def forward(self, feats): + """Forward pass. + Args: + feats (list[torch.Tensor]): Multi-level features, e.g., + features produced by FPN. + Returns: + tuple(list[dict]): Output results. first index by level, second index by layer + """ + if isinstance(feats, torch.Tensor): + feats = [feats] + res = multi_apply(self.forward_single, feats, [None]) + assert len(res) == 1, "only support one level features." + return res + + def get_targets(self, gt_bboxes_3d, gt_labels_3d, preds_dict): + """Generate training targets. + Args: + gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes. + gt_labels_3d (torch.Tensor): Labels of boxes. + preds_dicts (tuple of dict): first index by layer (default 1) + Returns: + tuple[torch.Tensor]: Tuple of target including \ + the following results in order. + - torch.Tensor: classification target. [BS, num_proposals] + - torch.Tensor: classification weights (mask) [BS, num_proposals] + - torch.Tensor: regression target. [BS, num_proposals, 8] + - torch.Tensor: regression weights. [BS, num_proposals, 8] + """ + # change preds_dict into list of dict (index by batch_id) + # preds_dict[0]['center'].shape [bs, 3, num_proposal] + list_of_pred_dict = [] + for batch_idx in range(len(gt_bboxes_3d)): + pred_dict = {} + for key in preds_dict[0].keys(): + pred_dict[key] = preds_dict[0][key][batch_idx : batch_idx + 1] + list_of_pred_dict.append(pred_dict) + + assert len(gt_bboxes_3d) == len(list_of_pred_dict) + + res_tuple = multi_apply( + self.get_targets_single, + gt_bboxes_3d, + gt_labels_3d, + list_of_pred_dict, + np.arange(len(gt_labels_3d)), + ) + labels = torch.cat(res_tuple[0], dim=0) + label_weights = torch.cat(res_tuple[1], dim=0) + bbox_targets = torch.cat(res_tuple[2], dim=0) + bbox_weights = torch.cat(res_tuple[3], dim=0) + ious = torch.cat(res_tuple[4], dim=0) + num_pos = np.sum(res_tuple[5]) + matched_ious = np.mean(res_tuple[6]) + heatmap = torch.cat(res_tuple[7], dim=0) + return ( + labels, + label_weights, + bbox_targets, + bbox_weights, + ious, + num_pos, + matched_ious, + heatmap, + ) + + def get_targets_single(self, gt_bboxes_3d, gt_labels_3d, preds_dict, batch_idx): + """Generate training targets for a single sample. + Args: + gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes. + gt_labels_3d (torch.Tensor): Labels of boxes. + preds_dict (dict): dict of prediction result for a single sample + Returns: + tuple[torch.Tensor]: Tuple of target including \ + the following results in order. + - torch.Tensor: classification target. [1, num_proposals] + - torch.Tensor: classification weights (mask) [1, num_proposals] + - torch.Tensor: regression target. [1, num_proposals, 8] + - torch.Tensor: regression weights. [1, num_proposals, 8] + - torch.Tensor: iou target. [1, num_proposals] + - int: number of positive proposals + """ + num_proposals = preds_dict["center"].shape[-1] + + # get pred boxes, carefully ! donot change the network outputs + score = copy.deepcopy(preds_dict["heatmap"].detach()) + center = copy.deepcopy(preds_dict["center"].detach()) + height = copy.deepcopy(preds_dict["height"].detach()) + dim = copy.deepcopy(preds_dict["dim"].detach()) + rot = copy.deepcopy(preds_dict["rot"].detach()) + if "vel" in preds_dict.keys(): + vel = copy.deepcopy(preds_dict["vel"].detach()) + else: + vel = None + + boxes_dict = self.bbox_coder.decode( + score, rot, dim, center, height, vel + ) # decode the prediction to real world metric bbox + bboxes_tensor = boxes_dict[0]["bboxes"] + gt_bboxes_tensor = gt_bboxes_3d.tensor.to(score.device) + # each layer should do label assign seperately. + if self.auxiliary: + num_layer = self.num_decoder_layers + else: + num_layer = 1 + + assign_result_list = [] + for idx_layer in range(num_layer): + bboxes_tensor_layer = bboxes_tensor[ + self.num_proposals * idx_layer : self.num_proposals * (idx_layer + 1), : + ] + score_layer = score[ + ..., + self.num_proposals * idx_layer : self.num_proposals * (idx_layer + 1), + ] + + if self.train_cfg.assigner.type == "HungarianAssigner3D": + assign_result = self.bbox_assigner.assign( + bboxes_tensor_layer, + gt_bboxes_tensor, + gt_labels_3d, + score_layer, + self.train_cfg, + ) + elif self.train_cfg.assigner.type == "HeuristicAssigner": + assign_result = self.bbox_assigner.assign( + bboxes_tensor_layer, + gt_bboxes_tensor, + None, + gt_labels_3d, + self.query_labels[batch_idx], + ) + else: + raise NotImplementedError + assign_result_list.append(assign_result) + + # combine assign result of each layer + max_overlaps = [] + for res in assign_result_list: + if res.max_overlaps is not None: + max_overlaps.append(res.max_overlaps) + else: + max_overlaps.append(torch.zeros(self.num_proposals).to(center)) + assign_result_ensemble = AssignResult( + num_gts=sum([res.num_gts for res in assign_result_list]), + gt_inds=torch.cat([res.gt_inds for res in assign_result_list]), + max_overlaps=torch.cat(max_overlaps), + labels=torch.cat([res.labels for res in assign_result_list]), + ) + sampling_result = self.bbox_sampler.sample( + assign_result_ensemble, bboxes_tensor, gt_bboxes_tensor + ) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + assert len(pos_inds) + len(neg_inds) == num_proposals + + # create target for loss computation + bbox_targets = torch.zeros([num_proposals, self.bbox_coder.code_size]).to( + center.device + ) + bbox_weights = torch.zeros([num_proposals, self.bbox_coder.code_size]).to( + center.device + ) + ious = assign_result_ensemble.max_overlaps + ious = torch.clamp(ious, min=0.0, max=1.0) + labels = bboxes_tensor.new_zeros(num_proposals, dtype=torch.long) + label_weights = bboxes_tensor.new_zeros(num_proposals, dtype=torch.long) + + if gt_labels_3d is not None: # default label is -1 + labels += self.num_classes + + # both pos and neg have classification loss, only pos has regression and iou loss + if len(pos_inds) > 0: + pos_bbox_targets = self.bbox_coder.encode(sampling_result.pos_gt_bboxes) + + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + + if gt_labels_3d is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels_3d[sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # # compute dense heatmap targets + device = labels.device + gt_bboxes_3d = torch.cat( + [gt_bboxes_3d.gravity_center, gt_bboxes_3d.tensor[:, 3:]], dim=1 + ).to(device) + grid_size = torch.tensor(self.train_cfg["grid_size"]) + pc_range = torch.tensor(self.train_cfg["point_cloud_range"]) + voxel_size = torch.tensor(self.train_cfg["voxel_size"]) + feature_map_size = ( + grid_size[:2] // self.train_cfg["out_size_factor"] + ) # [x_len, y_len] + heatmap = gt_bboxes_3d.new_zeros( + self.num_classes, feature_map_size[1], feature_map_size[0] + ) + for idx in range(len(gt_bboxes_3d)): + width = gt_bboxes_3d[idx][3] + length = gt_bboxes_3d[idx][4] + width = width / voxel_size[0] / self.train_cfg["out_size_factor"] + length = length / voxel_size[1] / self.train_cfg["out_size_factor"] + if width > 0 and length > 0: + radius = gaussian_radius( + (length, width), min_overlap=self.train_cfg["gaussian_overlap"] + ) + radius = max(self.train_cfg["min_radius"], int(radius)) + x, y = gt_bboxes_3d[idx][0], gt_bboxes_3d[idx][1] + + coor_x = ( + (x - pc_range[0]) + / voxel_size[0] + / self.train_cfg["out_size_factor"] + ) + coor_y = ( + (y - pc_range[1]) + / voxel_size[1] + / self.train_cfg["out_size_factor"] + ) + + center = torch.tensor( + [coor_x, coor_y], dtype=torch.float32, device=device + ) + center_int = center.to(torch.int32) + + # original + # draw_heatmap_gaussian(heatmap[gt_labels_3d[idx]], center_int, radius) + # NOTE: fix + draw_heatmap_gaussian(heatmap[gt_labels_3d[idx]], center_int, + radius) + + + mean_iou = ious[pos_inds].sum() / max(len(pos_inds), 1) + return ( + labels[None], + label_weights[None], + bbox_targets[None], + bbox_weights[None], + ious[None], + int(pos_inds.shape[0]), + float(mean_iou), + heatmap[None], + ) + + @force_fp32(apply_to=("preds_dicts")) + def loss(self, gt_bboxes_3d, gt_labels_3d, preds_dicts, **kwargs): + """Loss function for CenterHead. + Args: + gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground + truth gt boxes. + gt_labels_3d (list[torch.Tensor]): Labels of boxes. + preds_dicts (list[list[dict]]): Output of forward function. + Returns: + dict[str:torch.Tensor]: Loss of heatmap and bbox of each task. + """ + ( + labels, + label_weights, + bbox_targets, + bbox_weights, + ious, + num_pos, + matched_ious, + heatmap, + ) = self.get_targets(gt_bboxes_3d, gt_labels_3d, preds_dicts[0]) + if hasattr(self, "on_the_image_mask"): + label_weights = label_weights * self.on_the_image_mask + bbox_weights = bbox_weights * self.on_the_image_mask[:, :, None] + num_pos = bbox_weights.max(-1).values.sum() + preds_dict = preds_dicts[0][0] + loss_dict = dict() + + # compute heatmap loss + loss_heatmap = self.loss_heatmap( + clip_sigmoid(preds_dict["dense_heatmap"]), + heatmap, + avg_factor=max(heatmap.eq(1).float().sum().item(), 1), + ) + loss_dict["loss_heatmap"] = loss_heatmap + + # compute loss for each layer + for idx_layer in range(self.num_decoder_layers if self.auxiliary else 1): + if idx_layer == self.num_decoder_layers - 1 or ( + idx_layer == 0 and self.auxiliary is False + ): + prefix = "layer_-1" + else: + prefix = f"layer_{idx_layer}" + + layer_labels = labels[ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ].reshape(-1) + layer_label_weights = label_weights[ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ].reshape(-1) + layer_score = preds_dict["heatmap"][ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ] + layer_cls_score = layer_score.permute(0, 2, 1).reshape(-1, self.num_classes) + layer_loss_cls = self.loss_cls( + layer_cls_score, + layer_labels, + layer_label_weights, + avg_factor=max(num_pos, 1), + ) + + layer_center = preds_dict["center"][ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ] + layer_height = preds_dict["height"][ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ] + layer_rot = preds_dict["rot"][ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ] + layer_dim = preds_dict["dim"][ + ..., + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + ] + preds = torch.cat( + [layer_center, layer_height, layer_dim, layer_rot], dim=1 + ).permute( + 0, 2, 1 + ) # [BS, num_proposals, code_size] + if "vel" in preds_dict.keys(): + layer_vel = preds_dict["vel"][ + ..., + idx_layer + * self.num_proposals : (idx_layer + 1) + * self.num_proposals, + ] + preds = torch.cat( + [layer_center, layer_height, layer_dim, layer_rot, layer_vel], dim=1 + ).permute( + 0, 2, 1 + ) # [BS, num_proposals, code_size] + code_weights = self.train_cfg.get("code_weights", None) + layer_bbox_weights = bbox_weights[ + :, + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + :, + ] + layer_reg_weights = layer_bbox_weights * layer_bbox_weights.new_tensor( + code_weights + ) + layer_bbox_targets = bbox_targets[ + :, + idx_layer * self.num_proposals : (idx_layer + 1) * self.num_proposals, + :, + ] + layer_loss_bbox = self.loss_bbox( + preds, layer_bbox_targets, layer_reg_weights, avg_factor=max(num_pos, 1) + ) + + # layer_iou = preds_dict['iou'][..., idx_layer*self.num_proposals:(idx_layer+1)*self.num_proposals].squeeze(1) + # layer_iou_target = ious[..., idx_layer*self.num_proposals:(idx_layer+1)*self.num_proposals] + # layer_loss_iou = self.loss_iou(layer_iou, layer_iou_target, layer_bbox_weights.max(-1).values, avg_factor=max(num_pos, 1)) + + loss_dict[f"{prefix}_loss_cls"] = layer_loss_cls + loss_dict[f"{prefix}_loss_bbox"] = layer_loss_bbox + # loss_dict[f'{prefix}_loss_iou'] = layer_loss_iou + + loss_dict[f"matched_ious"] = layer_loss_cls.new_tensor(matched_ious) + + return loss_dict + + def get_bboxes(self, preds_dicts, metas, img=None, rescale=False, for_roi=False): + """Generate bboxes from bbox head predictions. + Args: + preds_dicts (tuple[list[dict]]): Prediction results. + Returns: + list[list[dict]]: Decoded bbox, scores and labels for each layer & each batch + """ + rets = [] + for layer_id, preds_dict in enumerate(preds_dicts): + batch_size = preds_dict[0]["heatmap"].shape[0] + batch_score = preds_dict[0]["heatmap"][..., -self.num_proposals :].sigmoid() + # if self.loss_iou.loss_weight != 0: + # batch_score = torch.sqrt(batch_score * preds_dict[0]['iou'][..., -self.num_proposals:].sigmoid()) + one_hot = F.one_hot( + self.query_labels, num_classes=self.num_classes + ).permute(0, 2, 1) + batch_score = batch_score * preds_dict[0]["query_heatmap_score"] * one_hot + + batch_center = preds_dict[0]["center"][..., -self.num_proposals :] + batch_height = preds_dict[0]["height"][..., -self.num_proposals :] + batch_dim = preds_dict[0]["dim"][..., -self.num_proposals :] + batch_rot = preds_dict[0]["rot"][..., -self.num_proposals :] + batch_vel = None + if "vel" in preds_dict[0]: + batch_vel = preds_dict[0]["vel"][..., -self.num_proposals :] + + temp = self.bbox_coder.decode( + batch_score, + batch_rot, + batch_dim, + batch_center, + batch_height, + batch_vel, + filter=True, + ) + + if self.test_cfg["dataset"] == "nuScenes": + self.tasks = [ + dict( + num_class=8, + class_names=[], + indices=[0, 1, 2, 3, 4, 5, 6, 7], + radius=-1, + ), + dict( + num_class=1, + class_names=["pedestrian"], + indices=[8], + radius=0.175, + ), + dict( + num_class=1, + class_names=["traffic_cone"], + indices=[9], + radius=0.175, + ), + ] + elif self.test_cfg["dataset"] == "Waymo": + self.tasks = [ + dict(num_class=1, class_names=["Car"], indices=[0], radius=0.7), + dict( + num_class=1, class_names=["Pedestrian"], indices=[1], radius=0.7 + ), + dict(num_class=1, class_names=["Cyclist"], indices=[2], radius=0.7), + ] + + ret_layer = [] + for i in range(batch_size): + boxes3d = temp[i]["bboxes"] + scores = temp[i]["scores"] + labels = temp[i]["labels"] + ## adopt circle nms for different categories + assert self.test_cfg["nms_type"] is None + ret = dict(bboxes=boxes3d, scores=scores, labels=labels) + ret_layer.append(ret) + rets.append(ret_layer) + assert len(rets) == 1 + assert len(rets[0]) == 1 + res = [ + [ + metas[0]["box_type_3d"]( + rets[0][0]["bboxes"], box_dim=rets[0][0]["bboxes"].shape[-1] + ), + rets[0][0]["scores"], + rets[0][0]["labels"].int(), + ] + ] + return res diff --git a/mmdet3d/models/detectors/__init__.py b/mmdet3d/models/detectors/__init__.py index 713a4d3b..afc800cb 100644 --- a/mmdet3d/models/detectors/__init__.py +++ b/mmdet3d/models/detectors/__init__.py @@ -3,6 +3,7 @@ from .bevdet import BEVDepth4D, BEVDet, BEVDet4D, BEVDetTRT, BEVStereo4D from .bevdet_occ import BEVStereo4DOCC from .centerpoint import CenterPoint +from .dal import DAL from .dynamic_voxelnet import DynamicVoxelNet from .fcos_mono3d import FCOSMono3D from .groupfree3dnet import GroupFree3DNet diff --git a/mmdet3d/models/detectors/bevdet.py b/mmdet3d/models/detectors/bevdet.py index 5b4feb41..ad1154e2 100644 --- a/mmdet3d/models/detectors/bevdet.py +++ b/mmdet3d/models/detectors/bevdet.py @@ -7,6 +7,7 @@ from mmdet.models import DETECTORS from .. import builder from .centerpoint import CenterPoint +from mmdet3d.models.utils.grid_mask import GridMask from mmdet.models.backbones.resnet import ResNet @@ -23,18 +24,28 @@ class BEVDet(CenterPoint): img_bev_encoder_neck (dict): Configuration dict of the BEV encoder neck. """ - def __init__(self, img_view_transformer, img_bev_encoder_backbone, - img_bev_encoder_neck, **kwargs): + def __init__(self, + img_view_transformer, + img_bev_encoder_backbone=None, + img_bev_encoder_neck=None, + use_grid_mask=False, + **kwargs): super(BEVDet, self).__init__(**kwargs) + self.grid_mask = None if not use_grid_mask else \ + GridMask(True, True, rotate=1, offset=False, ratio=0.5, mode=1, + prob=0.7) self.img_view_transformer = builder.build_neck(img_view_transformer) - self.img_bev_encoder_backbone = \ - builder.build_backbone(img_bev_encoder_backbone) - self.img_bev_encoder_neck = builder.build_neck(img_bev_encoder_neck) + if img_bev_encoder_neck and img_bev_encoder_backbone: + self.img_bev_encoder_backbone = \ + builder.build_backbone(img_bev_encoder_backbone) + self.img_bev_encoder_neck = builder.build_neck(img_bev_encoder_neck) def image_encoder(self, img, stereo=False): imgs = img B, N, C, imH, imW = imgs.shape imgs = imgs.view(B * N, C, imH, imW) + if self.grid_mask is not None: + imgs = self.grid_mask(imgs) x = self.img_backbone(imgs) stereo_feat = None if stereo: diff --git a/mmdet3d/models/detectors/dal.py b/mmdet3d/models/detectors/dal.py new file mode 100644 index 00000000..87e4332f --- /dev/null +++ b/mmdet3d/models/detectors/dal.py @@ -0,0 +1,155 @@ +import torch +from .bevdet import BEVDet +from mmdet.models import DETECTORS +from mmdet3d.models.utils import FFN +from mmdet3d.models.utils.spconv_voxelize import SPConvVoxelization + + +@DETECTORS.register_module() +class DAL(BEVDet): + def __init__(self, **kwargs): + super(DAL, self).__init__(**kwargs) + + # image view auxiliary task heads + self.num_cls = self.pts_bbox_head.num_classes + heads = dict(heatmap=(self.num_cls, 2)) + input_feat_dim = kwargs['pts_bbox_head']['hidden_channel'] + self.auxiliary_heads = FFN( + input_feat_dim, + heads, + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), + bias=True) + self.auxiliary_heads.init_weights() + + pts_voxel_cfg = kwargs.get('pts_voxel_layer', None) + if pts_voxel_cfg: + pts_voxel_cfg['num_point_features'] = 5 + self.pts_voxel_layer = SPConvVoxelization(**pts_voxel_cfg) + + def extract_img_feat(self, img, img_metas): + """Extract features of images.""" + img = self.prepare_inputs(img) + x, _ = self.image_encoder(img[0]) + return [x] + img[1:] + + def extract_feat(self, points, img, img_metas): + """Extract features from images and points.""" + img_feats = self.extract_img_feat(img, img_metas) + pts_feats = self.extract_pts_feat(points, img_feats, img_metas) + return (img_feats, pts_feats) + + def forward_img_auxiliary_train(self, + x, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + proposals=None, + **kwargs): + max_instance = 150 + num_pos = 0 + centers_augego = x[0].new_zeros((len(gt_bboxes), max_instance, 3)) + box_targets_all = x[0].new_zeros((len(gt_bboxes), max_instance, 10)) + valid_mask = x[0].new_zeros((len(gt_bboxes), max_instance, 1)) + label = x[0].new_zeros((len(gt_bboxes), max_instance, 1)).to(torch.long) + for sid in range(len(gt_bboxes)): + centers_augego_tmp = gt_bboxes[sid].gravity_center.to(x[0]) + box_targets_tmp = self.pts_bbox_head.bbox_coder.encode(gt_bboxes[sid].tensor) + if gt_bboxes_ignore is not None: + centers_augego_tmp = centers_augego_tmp[gt_bboxes_ignore[sid], :] + box_targets_tmp = box_targets_tmp[gt_bboxes_ignore[sid], :] + num_valid_samples = centers_augego_tmp.shape[0] + num_pos += num_valid_samples + valid_mask[sid, :num_valid_samples, :] = 1.0 + centers_augego[sid,:num_valid_samples,:] = centers_augego_tmp + box_targets_all[sid,:num_valid_samples,:] = box_targets_tmp + label_tmp = gt_labels[sid].unsqueeze(-1) + if gt_bboxes_ignore is not None: + label_tmp = label_tmp[gt_bboxes_ignore[sid], :] + label[sid,:num_valid_samples,:] = label_tmp + img_feats = self.pts_bbox_head.extract_img_feat_from_3dpoints( + centers_augego, x, fuse=False) + heatmap = self.auxiliary_heads.heatmap(img_feats) + loss_cls_img = self.pts_bbox_head.loss_cls( + heatmap.permute(0, 2, 1).reshape(-1, self.num_cls), + label.flatten(), + valid_mask.flatten(), + avg_factor=max(num_pos, 1)) + return dict(loss_cls_img=loss_cls_img) + + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img_inputs=None, + proposals=None, + gt_bboxes_ignore=None, + **kwargs): + """Forward training function. + + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + + Returns: + dict: Losses of different branches. + """ + img_feats, pts_feats = self.extract_feat( + points, img=img_inputs, img_metas=img_metas) + img_feats_bev = \ + self.img_view_transformer(img_feats + img_inputs[1:7], + depth_from_lidar=kwargs['gt_depth']) + + losses = dict() + losses_pts = \ + self.forward_pts_train([img_feats, pts_feats, img_feats_bev], + gt_bboxes_3d, gt_labels_3d, img_metas, + gt_bboxes_ignore) + losses.update(losses_pts) + losses_img_auxiliary = \ + self.forward_img_auxiliary_train(img_feats,img_metas, + gt_bboxes_3d, gt_labels_3d, + gt_bboxes_ignore, + **kwargs) + losses.update(losses_img_auxiliary) + return losses + + def simple_test(self, + points, + img_metas, + img_inputs=None, + rescale=False, + **kwargs): + """Test function without augmentaiton.""" + img_feats, pts_feats = self.extract_feat( + points, img=img_inputs, img_metas=img_metas) + img_feats_bev = \ + self.img_view_transformer(img_feats + img_inputs[1:7], + depth_from_lidar=kwargs['gt_depth'][0]) + + bbox_list = [dict() for _ in range(len(img_metas))] + bbox_pts = self.simple_test_pts([img_feats, pts_feats, img_feats_bev], + img_metas, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return bbox_list \ No newline at end of file diff --git a/mmdet3d/models/necks/view_transformer.py b/mmdet3d/models/necks/view_transformer.py index d788aa08..47cc39d3 100644 --- a/mmdet3d/models/necks/view_transformer.py +++ b/mmdet3d/models/necks/view_transformer.py @@ -11,6 +11,8 @@ from mmdet.models.backbones.resnet import BasicBlock from ..builder import NECKS +from torch.utils.checkpoint import checkpoint + @NECKS.register_module() class LSSViewTransformer(BaseModule): @@ -47,8 +49,11 @@ def __init__( accelerate=False, sid=False, collapse_z=True, + with_cp=False, + with_depth_from_lidar=False, ): super(LSSViewTransformer, self).__init__() + self.with_cp = with_cp self.grid_config = grid_config self.downsample = downsample self.create_grid_infos(**grid_config) @@ -62,6 +67,28 @@ def __init__( self.accelerate = accelerate self.initial_flag = True self.collapse_z = collapse_z + self.with_depth_from_lidar = with_depth_from_lidar + if self.with_depth_from_lidar: + self.lidar_input_net = nn.Sequential( + nn.Conv2d(1, 8, 1), + nn.BatchNorm2d(8), + nn.ReLU(True), + nn.Conv2d(8, 32, 5, stride=4, padding=2), + nn.BatchNorm2d(32), + nn.ReLU(True), + nn.Conv2d(32, 64, 5, stride=int(2 * self.downsample / 8), + padding=2), + nn.BatchNorm2d(64), + nn.ReLU(True)) + out_channels = self.D + self.out_channels + self.depth_net = nn.Sequential( + nn.Conv2d(in_channels + 64, in_channels, 3, padding=1), + nn.BatchNorm2d(in_channels), + nn.ReLU(True), + nn.Conv2d(in_channels, in_channels, 3, padding=1), + nn.BatchNorm2d(in_channels), + nn.ReLU(True), + nn.Conv2d(in_channels, out_channels, 1)) def create_grid_infos(self, x, y, z, **kwargs): """Generate the grid information including the lower bound, interval, @@ -147,8 +174,9 @@ def get_lidar_coor(self, sensor2ego, ego2global, cam2imgs, post_rots, post_trans combine = sensor2ego[:,:,:3,:3].matmul(torch.inverse(cam2imgs)) points = combine.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1) points += sensor2ego[:,:,:3, 3].view(B, N, 1, 1, 1, 3) - points = bda.view(B, 1, 1, 1, 1, 3, - 3).matmul(points.unsqueeze(-1)).squeeze(-1) + points = bda[:, :3, :3].view(B, 1, 1, 1, 1, 3, 3).matmul( + points.unsqueeze(-1)).squeeze(-1) + points += bda[:, :3, 3].view(B, 1, 1, 1, 1, 3) return points def init_acceleration_v2(self, coor): @@ -290,11 +318,13 @@ def view_transform_core(self, input, depth, tran_feat): return bev_feat, depth def view_transform(self, input, depth, tran_feat): + for shape_id in range(3): + assert depth.shape[shape_id+1] == self.frustum.shape[shape_id] if self.accelerate: self.pre_compute(input) return self.view_transform_core(input, depth, tran_feat) - def forward(self, input): + def forward(self, input, depth_from_lidar=None): """Transform image-view feature into bird-eye-view feature. Args: @@ -307,7 +337,19 @@ def forward(self, input): x = input[0] B, N, C, H, W = x.shape x = x.view(B * N, C, H, W) - x = self.depth_net(x) + if self.with_depth_from_lidar: + assert depth_from_lidar is not None + if isinstance(depth_from_lidar, list): + assert len(depth_from_lidar) == 1 + depth_from_lidar = depth_from_lidar[0] + h_img, w_img = depth_from_lidar.shape[2:] + depth_from_lidar = depth_from_lidar.view(B * N, 1, h_img, w_img) + depth_from_lidar = self.lidar_input_net(depth_from_lidar) + x = torch.cat([x, depth_from_lidar], dim=1) + if self.with_cp: + x =checkpoint(self.depth_net, x) + else: + x = self.depth_net(x) depth_digit = x[:, :self.D, ...] tran_feat = x[:, self.D:self.D + self.out_channels, ...] diff --git a/mmdet3d/models/utils/__init__.py b/mmdet3d/models/utils/__init__.py index 92a0499a..63c07660 100644 --- a/mmdet3d/models/utils/__init__.py +++ b/mmdet3d/models/utils/__init__.py @@ -4,8 +4,10 @@ from .gen_keypoints import get_keypoints from .handle_objs import filter_outside_objs, handle_proj_objs from .mlp import MLP +from .transformer import * +from .grid_mask import GridMask __all__ = [ 'clip_sigmoid', 'MLP', 'get_edge_indices', 'filter_outside_objs', - 'handle_proj_objs', 'get_keypoints' + 'handle_proj_objs', 'get_keypoints', 'GridMask' ] diff --git a/mmdet3d/models/utils/grid_mask.py b/mmdet3d/models/utils/grid_mask.py new file mode 100644 index 00000000..7d0844cd --- /dev/null +++ b/mmdet3d/models/utils/grid_mask.py @@ -0,0 +1,127 @@ +import torch +import torch.nn as nn +import numpy as np +from PIL import Image +from mmcv.runner import force_fp32, auto_fp16 + + +class Grid(object): + def __init__(self, use_h, use_w, rotate=1, offset=False, ratio=0.5, mode=0, prob=1.): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch + + def __call__(self, img, label): + if np.random.rand() > self.prob: + return img, label + h = img.size(1) + w = img.size(2) + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.l = np.random.randint(1, d) + else: + self.l = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.l, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.l, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // 2 + w] + + mask = torch.from_numpy(mask).float() + if self.mode == 1: + mask = 1 - mask + + mask = mask.expand_as(img) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h, w) - 0.5)).float() + offset = (1 - mask) * offset + img = img * mask + offset + else: + img = img * mask + + return img, label + + +class GridMask(nn.Module): + def __init__(self, use_h, use_w, rotate=1, offset=False, ratio=0.5, mode=0, prob=1.): + super(GridMask, self).__init__() + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.fp16_enable = False + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch # + 1.#0.5 + + @auto_fp16() + def forward(self, x): + if np.random.rand() > self.prob or not self.training: + return x + n, c, h, w = x.size() + x = x.reshape(-1, h, w) + hh = int(1.5 * h) + ww = int(1.5 * w) + d = np.random.randint(2, h) + self.l = min(max(int(d * self.ratio + 0.5), 1), d - 1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh // d): + s = d * i + st_h + t = min(s + self.l, hh) + mask[s:t, :] *= 0 + if self.use_w: + for i in range(ww // d): + s = d * i + st_w + t = min(s + self.l, ww) + mask[:, s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // 2 + w] + + mask = torch.from_numpy(mask).to(x.dtype).cuda() + if self.mode == 1: + mask = 1 - mask + mask = mask.expand_as(x) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h, w) - 0.5)).to(x.dtype).cuda() + x = x * mask + offset * (1 - mask) + else: + x = x * mask + + return x.view(n, c, h, w) \ No newline at end of file diff --git a/mmdet3d/models/utils/spconv_voxelize.py b/mmdet3d/models/utils/spconv_voxelize.py new file mode 100644 index 00000000..d9be7b7d --- /dev/null +++ b/mmdet3d/models/utils/spconv_voxelize.py @@ -0,0 +1,72 @@ +# Copyright (c) 2023 megvii-model. All Rights Reserved. + +import numpy as np +from torch import nn +from spconv.pytorch.utils import PointToVoxel # spconv-cu111 2.1.21 +import torch +import torch.nn.functional as F +from torch.nn.modules.utils import _pair + + +class SPConvVoxelization(nn.Module): + def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels, num_point_features, + device=torch.device("cuda")): + super().__init__() + assert len(voxel_size) == 3 + assert len(point_cloud_range) == 6 + self.voxel_size = np.array(voxel_size) + self.point_cloud_range = np.array(point_cloud_range) + self.max_num_points = max_num_points + self.num_point_features = num_point_features + self.device = device + if isinstance(max_voxels, tuple): + self.max_voxels = max_voxels + else: + self.max_voxels = _pair(max_voxels) + self.voxel_generator = PointToVoxel( + vsize_xyz=voxel_size, + coors_range_xyz=point_cloud_range, + max_num_points_per_voxel=max_num_points, + max_num_voxels=self.max_voxels[0], + num_point_features=num_point_features, + device=device, + ) + grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(voxel_size) + self.grid_size = np.round(grid_size).astype(np.int64) + + def train(self, mode: bool = True): + if mode: + self.voxel_generator = PointToVoxel( + vsize_xyz=self.voxel_size.tolist(), + coors_range_xyz=self.point_cloud_range.tolist(), + max_num_points_per_voxel=self.max_num_points, + max_num_voxels=self.max_voxels[0], + num_point_features=self.num_point_features, + device=self.device, + ) + else: + self.voxel_generator = PointToVoxel( + vsize_xyz=self.voxel_size.tolist(), + coors_range_xyz=self.point_cloud_range.tolist(), + max_num_points_per_voxel=self.max_num_points, + max_num_voxels=self.max_voxels[1], + num_point_features=self.num_point_features, + device=self.device, + ) + + return super().train(mode) + + def forward(self, points): + voxel_output = self.voxel_generator(points) + voxels, coordinates, num_points = voxel_output + return torch.clone(voxels), torch.clone(coordinates), torch.clone(num_points) + + def __repr__(self): + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'voxel_size=' + str(self.voxel_size) + tmpstr += ', point_cloud_range=' + str(self.point_cloud_range) + tmpstr += ', max_num_points=' + str(self.max_num_points) + tmpstr += ', max_voxels=' + str(self.max_voxels) + tmpstr += ', num_point_features=' + str(self.num_point_features) + tmpstr += ')' + return tmpstr \ No newline at end of file diff --git a/mmdet3d/models/utils/transformer.py b/mmdet3d/models/utils/transformer.py new file mode 100644 index 00000000..659af1ed --- /dev/null +++ b/mmdet3d/models/utils/transformer.py @@ -0,0 +1,578 @@ +from mmcv.cnn import ConvModule, build_conv_layer, kaiming_init + +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn.parameter import Parameter +from torch.nn import Linear +from torch.nn.init import xavier_uniform_, constant_ + + +__all__ = ["PositionEmbeddingLearned", "TransformerDecoderLayer", "MultiheadAttention", "FFN"] + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + + def __init__(self, input_channel, num_pos_feats=288): + super().__init__() + self.position_embedding_head = nn.Sequential( + nn.Conv1d(input_channel, num_pos_feats, kernel_size=1), + nn.BatchNorm1d(num_pos_feats), + nn.ReLU(inplace=True), + nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1)) + + def forward(self, xyz): + xyz = xyz.transpose(1, 2).contiguous() + position_embedding = self.position_embedding_head(xyz) + return position_embedding + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", + self_posembed=None, cross_posembed=None, cross_only=False): + super().__init__() + self.cross_only = cross_only + if not self.cross_only: + self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + self.activation = _get_activation_fn(activation) + + self.self_posembed = self_posembed + self.cross_posembed = cross_posembed + + def with_pos_embed(self, tensor, pos_embed): + return tensor if pos_embed is None else tensor + pos_embed + + def forward(self, query, key, query_pos, key_pos, attn_mask=None): + """ + :param query: B C Pq + :param key: B C Pk + :param query_pos: B Pq 3/6 + :param key_pos: B Pk 3/6 + :param value_pos: [B Pq 3/6] + :return: + """ + # NxCxP to PxNxC + if self.self_posembed is not None: + query_pos_embed = self.self_posembed(query_pos).permute(2, 0, 1) + else: + query_pos_embed = None + if self.cross_posembed is not None: + key_pos_embed = self.cross_posembed(key_pos).permute(2, 0, 1) + else: + key_pos_embed = None + + query = query.permute(2, 0, 1) + key = key.permute(2, 0, 1) + + if not self.cross_only: + q = k = v = self.with_pos_embed(query, query_pos_embed) + query2 = self.self_attn(q, k, value=v)[0] + query = query + self.dropout1(query2) + query = self.norm1(query) + + query2 = self.multihead_attn(query=self.with_pos_embed(query, query_pos_embed), + key=self.with_pos_embed(key, key_pos_embed), + value=self.with_pos_embed(key, key_pos_embed), attn_mask=attn_mask)[0] + query = query + self.dropout2(query2) + query = self.norm2(query) + + query2 = self.linear2(self.dropout(self.activation(self.linear1(query)))) + query = query + self.dropout3(query2) + query = self.norm3(query) + + # NxCxP to PxNxC + query = query.permute(1, 2, 0) + return query + + +class MultiheadAttention(nn.Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in key. Default: None. + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + Examples:: + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, + vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = Linear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None): + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: mask that prevents attention to certain positions. This is an additive mask + (i.e. the values will be added to the attention layer). + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. + - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + if not hasattr(self, '_qkv_same_embed_dim'): + warnings.warn('A new version of MultiheadAttention module has been implemented. \ + Please re-train your model with the new module', + UserWarning) + + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) + + +def multi_head_attention_forward(query, # type: Tensor + key, # type: Tensor + value, # type: Tensor + embed_dim_to_check, # type: int + num_heads, # type: int + in_proj_weight, # type: Tensor + in_proj_bias, # type: Tensor + bias_k, # type: Optional[Tensor] + bias_v, # type: Optional[Tensor] + add_zero_attn, # type: bool + dropout_p, # type: float + out_proj_weight, # type: Tensor + out_proj_bias, # type: Tensor + training=True, # type: bool + key_padding_mask=None, # type: Optional[Tensor] + need_weights=True, # type: bool + attn_mask=None, # type: Optional[Tensor] + use_separate_proj_weight=False, # type: bool + q_proj_weight=None, # type: Optional[Tensor] + k_proj_weight=None, # type: Optional[Tensor] + v_proj_weight=None, # type: Optional[Tensor] + static_k=None, # type: Optional[Tensor] + static_v=None, # type: Optional[Tensor] + ): + # type: (...) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: mask that prevents attention to certain positions. This is an additive mask + (i.e. the values will be added to the attention layer). + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in differnt forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. + - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + + qkv_same = torch.equal(query, key) and torch.equal(key, value) + kv_same = torch.equal(key, value) + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + assert list(query.size()) == [tgt_len, bsz, embed_dim] + assert key.size() == value.size() + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if use_separate_proj_weight is not True: + if qkv_same: + # self-attention + q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif kv_same: + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = F.linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = F.linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = F.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = F.linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = F.linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) + k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) + v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, + torch.zeros((attn_mask.size(0), 1), + dtype=attn_mask.dtype, + device=attn_mask.device)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros((key_padding_mask.size(0), 1), + dtype=key_padding_mask.dtype, + device=key_padding_mask.device)], dim=1) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1), + dtype=attn_mask.dtype, + device=attn_mask.device)], dim=1) + if key_padding_mask is not None: + key_padding_mask = torch.cat( + [key_padding_mask, torch.zeros((key_padding_mask.size(0), 1), + dtype=key_padding_mask.dtype, + device=key_padding_mask.device)], dim=1) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + attn_mask = attn_mask.unsqueeze(0) + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = F.softmax( + attn_output_weights, dim=-1) + attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None + + +class FFN(nn.Module): + def __init__(self, + in_channels, + heads, + head_conv=64, + final_kernel=1, + init_bias=-2.19, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + bias='auto', + prefix='', + **kwargs): + super(FFN, self).__init__() + + self.heads = heads + self.init_bias = init_bias + self.prefix = prefix + for head in self.heads: + classes, num_conv = self.heads[head] + + conv_layers = [] + c_in = in_channels + for i in range(num_conv - 1): + conv_layers.append( + ConvModule( + c_in, + head_conv, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + bias=bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + c_in = head_conv + + conv_layers.append( + build_conv_layer( + conv_cfg, + head_conv, + classes, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + bias=True)) + conv_layers = nn.Sequential(*conv_layers) + + self.__setattr__(prefix+head, conv_layers) + + def init_weights(self): + """Initialize weights.""" + for head in self.heads: + if head == 'heatmap': + self.__getattr__(self.prefix+head)[-1].bias.data.fill_(self.init_bias) + else: + for m in self.__getattr__(self.prefix+head).modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + + def forward(self, x): + """Forward function for SepHead. + Args: + x (torch.Tensor): Input feature map with the shape of + [B, 512, 128, 128]. + Returns: + dict[str: torch.Tensor]: contains the following keys: + -reg (torch.Tensor): 2D regression value with the \ + shape of [B, 2, H, W]. + -height (torch.Tensor): Height value with the \ + shape of [B, 1, H, W]. + -dim (torch.Tensor): Size value with the shape \ + of [B, 3, H, W]. + -rot (torch.Tensor): Rotation value with the \ + shape of [B, 1, H, W]. + -vel (torch.Tensor): Velocity value with the \ + shape of [B, 2, H, W]. + -heatmap (torch.Tensor): Heatmap with the shape of \ + [B, N, H, W]. + """ + ret_dict = dict() + for head in self.heads: + ret_dict[head] = self.__getattr__(head)(x) + + return ret_dict \ No newline at end of file diff --git a/resources/nds-fps-dal.png b/resources/nds-fps-dal.png new file mode 100644 index 0000000000000000000000000000000000000000..bc69b8808c135028ec759e51da0f267f2c8b9135 GIT binary patch literal 77085 zcmeFZcTiJl8#j!)h>Cy>Q4nHTks_cXU5JQ)f(S^H5~3n4^b!yPLqDk9RG^xh$q zhzKaXh0r6?386y*Dd#)v>h3OL}^A7Sz8T^|FS7G58?dQ~^}@waO>HtS*t zoW2OdC^yAOH8)#7bx@yFfYi&b{Oxr?8HirpSK@%Fvm!CnK{Q-=J4-J!bpS=3~a{kHm{Pb60bIb(RY5*n7Z6dGvR0${AkN#-+zR956)Zgua7+!ugUe%FHbI1cyyiU z@6MI^VK6Mf#(|?Wi`RbWLjBQP{@in!{n3BO{4sancab^nuG|;reDp|0V)ZL|J#?jP zmcXzoEERi;wQb|c5+qgF2%S3YNL^yHJk|1t`mX+V>bn|aT|`MdOMapSsE@T8r4B(o zh;(GcS!tTo=IWcyo6)k6q5Y zHxn^J&4<@*1hc-_M87kG9rXKip#p4wJ;<4)k5%RVvJgjtq$K!Mn=tq3)%_{F$K40M z+f@6h>1It=o8UINAwu6*{9I`<eB!-|<{N4q> z4}xxfv{0??PC!=K*DMRcbfwhFzV2e0pU$2{iMtBW4FB{diTyvGX!4mm zJ5n^P=o&rMQ(?UT*&z72qqDj2i|2BB$U|}ikdfx;VUm`0he{}SNRV_GZM}El*#6zV zb>I)2^FwoULjb9>{@0!`*X)RWYU?${bQ{4ff3%HaEfP2TnW3^cV$>8(chDJuyNRGU zpA=h-V30;d4`0;MH--w(SIz!%nETXP)H7ndGYu+36@b zJAL!@O(mrS-7~1J9n580npRqzQ7?4oT&$*2C!)KAAg_eFSCmwi;r){>2UstH0?!7 zXA*kO>c^H#F>3rR^+%u5p2fUgD}I;kjSsAZ+4UKO2m-ZBz=%7+Fp^0?-Oa5G`nkeH zZ}vU!Rh;YEj5c67HSs09FO^9xJTh_`XpNP2`iMDl)zFPJI7x5)vBR%|t8t`a0eFuSjvyM{(UYlm2o2zx>@4bRPazXWbHhS#b$hfKTfK)VKL;QwPz|r>Qtt{$Q z=)fD~v*luXg^zF1>(gWnhBLVipR+dRqz#Gut*rl$TSUrIhTHk5_oouh&W4j2ndJm8 z9S7{{lW($XdULUCvbuFTEU~Phq_^Mxv$n5a`7irC&G+}+|M9wmVSlRqhv-AUhw#VG z!JTbe()Qi|-00O$pk{vDy=^l;6m{|&=n+5e-d@-b2RX3(54HZ&!k$0+-!}e`*k4Zb zKW%)s-~Soa|9s=mX!1Yb`2QDhqi*a0@?t}xCEVWMQ3>wcL%4CX>vL~m^aYQ%NAm`f za(3F7DYdtjM$0?p+))kb6(BSC+l^9!oj3Ffps?Z|m8x<`dZxOY03JhIJ7R_2vP})) zOYL>iDSfCqInR0ad#wLY?PP9_LE$~4R#_QFrMlU@_jP(bG+c@G4jCg9Hd(|-kx5?M z+-CMx5%Xfp(QLUx-IFDff`Xc7(1Z()G4Fx2weMEbJA#2X?blS& zdRb#&+?UteED^Kj-Cil5{N$(F?tA7XG^N(}5W3keK6zyCuvW4Ap&wc!Rv6kPQrig` zxwhD9n8;xtxIvJd%fec#^w;V=`-l}bwoxtDBf<$eEpyDgYg#S*s5o`x>KQ-_m^554 z;D2L7{>^)>Rmn3E=j%D=>ohpo+SwaslzB5}nqe1MJEzgM23h~wJ9?aVZKWgsj;|GQ zd0;-vWk3T%tfjgpI1B^Abo0+{L`el1Bk^x%=#0GV-cEO8a)Gi`FOFQB%--WZdn8m- z+U}QDu?J8zq4olWtGk8y8x(g^-}A^kecK8h&fnNjDG9<($}@w^%q~EZO-fK+7-p+uZf%Sgai9cZJaSIr za!0XwlG}dN?KWTbGy(5^p8TaKmL~W=nO%`tC*VN+BW1;J#?fMttygpa$)a4WX%;*)gb$W?*sjS z!-v=BQr#BI$}+YPeJhIctIjs1j2>|W?wi*DJ^u-ei=m)w*w=RDX+1Xgwl_}f=mv?a zIB2ITFETxRv{mS8O~tzo1<8I#)|@K;>X=wjaKO=~r%Z{3AMpHjCCS&baEBN2MelT2?@} z6X;`A`noQ3yEM+C6>JwYkL^6!0ZWldwcvtrYAJt2kL^Z!Pw>-e5gTelFL^Ps=mjtO zblNe%EEvH^vtZ_>A2`7X^4BQDL$l*q-H4s!Yzdoo&&i7N7T$FhiK1`by5Ruh zdVce&lm~fCM&bi8`pxA2EBx~h8J9aG)JF3RbMD}>{x*YY&c#HJzkTAky#ep2r zP0c?GT$dk5rSHWvBf0EhZ^^XOSm(+VCjym@;xx0P^#6=ZfL;NWjg<{2HnNP7PL){fUDg=-M& z)L#Y_B??CblxhbfB`m|$3&Ykp45v(+_%o4*3ASqj65if{RKA_JZvokP`;9Jsr8Tj< zk#_uWo;Ho3b`M0$nE6}y1xH2K*an0e8pqjF+~Yp?Zj`o^T8*A)iSw9CV3%Ew$;xe>+vq zXp7_6(upHW<@bAnI@FV2EB#Z`4XyAw%-YmO&U+302tZkC(^0N&oV#9sf*bLr1TXpqEsgD&_ zHBz7vOYnC|%-%pMVZ@lxA@lAlYhTG%JA_c+y2sc0d~j3kwn8BgUFV33cZhr?z&(jSCP6k|R6!b?6s)sYYkSD(lCF=pN`7!SmgMrPV|(}Mia2(Jl!AUwK1xkbRu2du&a#r85QQ(> zX1TyE?N$k?h7|H|TTWjfv-KPboh+xumArcuL=IGke8j3p)hB415t^CH?!xFZz~x6% z2)iNo8-@c>(Hnipzzz9Eow#MJOF?|?>1L{lowj!0el(##aAO6vhF6?b&Ss)a+O!WT zvao&{PI+HyI7+szcoiO=zH(GD!}gb9@yc>9*Td5?G&%RwSxC9Wo@i@(oq5t;qw)M^ zQQl)~e%zbiN<~!I#O$)tH0ncIZ~|5yKF0GSYNIn^YZhbJN)(~2P{a$B6nH4}q2u_2u?l44kvm5I>Rl&Bk8WK%ezH&~*1&k$fTII!-6+o);R z)>xbV;?alH`=wB$KC=Q)+)(D~Wi-R9jcVb~A3BpF>7bAXfookBuMx1ozZRJ?s<|lB zG8hnJldvV<{Q(<7qMf_Ebk#Z^uN?xIoyWf*_N9wr4Kg|xg!daFav1MU1U4Rd=lGc zU&N_>5jEaH8*!7N-y0hL83y^keMB?+xB0wUaHzVgC}NT~QnO}*zFVHzwYXcjI5?8n z|Gp1#XmTiB<6^m2a>VEgyspVCsB45Ii6ZNr0ykh?KV`Ul&!twMv3RtBB3j-sH%Rcw zQ8=T5(27J8UlHe?Db(u3E@NkEcsLIiRikGL2F#FM>*Tp-6@`P3S}{?oV-`p|+=ZoM z!}$%V?zQj3s@SxeXS%p$D#8ZQ{YGJgQQc`}SRz+r?8wVsZ{g5K;CZ?*H&}MqIVUg_ zY~!BFmAG$iF68Rsxe7@kYZEV(`jDxgW74Fem61N&)y*doxtw+i0Ddfl_r0mlx zDl@bD`~agaXg5`TpKe8Jy3fdGJ|7-3QENKuDCSb%AVDyG94^DU{-tAQ_U~_GL62N} zRXcdZb+$-;18)7?vz5KYC?TewqR90uvsrH3Xi|kH6qnA)=M<{jZa!(RAKBUG)H%O`9$utpj7Z%~kpL0P~bD3o&pkKRK|V^#Ivs1%Tc~H&P_ZaH&&auNLn%-o5I1G2N*v~zJQJ;L z3-PJ(c^;Q<_mpYihVXr_MvkV+wwdA$6m#1v%To9%-iIa+lR0$&p_(fzTJM(N8L$+_ zdt#0+i)~jVaayc832%(eiiCUK)?~~bg-yv7d=O8Lo(-{1+5vSi%Nj*jJZpEyE-*+> z!6NFF(G?0@W6<4=Q2tZ6c&@eC10-88&ZcUpl-PSY6bijztQrU`Y+z13`Rp?m#BJQb z@KFa7d@pS&>{1-Xv<1tZ6CXF2X%@Ca&A1Tz0DHJOTtZ(AW}o}$UQkyEK;xh2-5bA) z8Ff?>TNWb{W5 z?#Bt@^cxqwSo>_)#`%t=Gb&Q(YLn5P+mV@CF_^OMO~zH7%kyA4R_ukCbpG@F6brtx zJ9>-#Mi55Lp}n;{Z?C{RvLFHN_KF$in$};z+&zq5^X8gzIZS2t>mEvojo^ zP9URig;a}^?XpHLnc6hREG#znRzSftdakvX@nSS#Gd2DS*jem7f;&rF{Bk8}bliP5 z6Ugm#7=`g&8l5Y;trKY>1pF?1RQLF~qcik`PrKvz8q+dXMR5{RBRL5rR#_?+6|;N$ z#d^J8s3Ti3)_ULDk$;7$H*J+hPUSe!eHS-WJaYiDkXSfx+`4<|(r?_xC3zlZfkmO< zVb2ep*qAXlhW#*mXQeau>;rVga_c!^3ADyrqhIc47ll){j=|k5*LTE!A_S`r^dc7VS1h&X>uEw})HSzStqFS@vCrEwiLPtauN8 zL@Qi(atYl%+kK9XW#!pa=LxIEnymJnDBF1K(vdnGr!HZA7lK6aqyye-aPKwir?MN| zSvOnU3}Or!AmH>zC;qPzXJ}_3C<*JNqylYh@;HZ6c6It*CLx}m>|86*HUz!7PuatQiVwUK zZqW0_40iY}@v#cF|5H_s$i`8+yHh8`dM|J_J-U29ZS-WYmxw38-|(m%2X9H+jh)Nn9SvWP{PyXAx2sK#Z7Bi-0a?{X;OBqS z4_{i`R5HAO|CDc?faM7VuYSnGD+k4gB`s0=ql?bjeuhT8ib9^A_r7o}UDnyznU)Ym zqtOb6E*uN~@VTvR9c^)iWryF8f89O5&^S4FPpJ5hlf-W)ZsS%XJ;4>Im(E8LQwt6I zILD+*%SOa>2`(=m(@)E9G_XfVt0YGI0p=}lH-XO}YL~{7`uZ&}81R$*8w;DO1hAX#7!yrgNM?l0LSfa4cAd zmr7nFgCa?9-c!!zTel~;we=Q=Y<*R%4L=?n7;HCG=JB_HDk@#?QN$6pV+{F1#wMi)zJUH*Qq#xxr1@t^hBDdasg76S zQuZ(&LE)TqGJ|NS$_)~8VNlG^l1g|L*O|{ z?;;JgpYG|<{|?@@JVOymJqY^dl{-URBg;fk zRQ17VmZFPWS}e2ymURLFUKLvxXe1=3hUs6Qc5K1*O6A>o<6efOIvdRfDe5QvOhgFnDx&H)xMPFy_CY*QmKejMem-q%>`?&8|ONe-s%X zJ-k&gY9(dL6KW)NKFlWFlaWi1mD8~S7v}d;9 zOQfQOgeZkaAY}q^_%(KU>g-*+=3;yQVvxx5Z(R2laxdUfpU~~W=rkQI$h^I(8gW@l zXVRPXh&|~&irB9S+TW~e%<~eYFq#&rd!kQ z)vnmxVAlSt%=rsy#);EZvp3Bag)a);ylFSS{srYQ*AQTjGjG^gOQ$8w_Ar}k(8YQi z-&g6%4ewy?lA0S)PBmCBKEz0I=oC-DB&1M1%0j4D9j-V9ajUY(t5TUgoj*Tu=CHpy zdHNsJhQB;lN4_2%ykK9R`l)-%!?wqJmM>7^(a-M; zl;A0qt9sh9OWI|PKINKg5|P(dRjij}%=yq}@olM5rE`d+cY@B|d}W;SuORa@{%2?o z{Mf7~aW}lxodb17Ug6}K`J6}FM5G|GHP!M?IhUg|zA?$U)Qr@OY)0DgRM!!g!;FzI znUSQo4Bx}^h&9@~2V;cP`O*|z*?K;5)qMRa`_qiGF*Tv0HJbbC6BG`LMvvZ$|1AwxAbzPPA25?8U~@It_#hcdeWg2?jkfAE0?QsC*N{N(Fm0!ZCwVtuUYZ=8 zu|BcX!W^5ugm(RwjW}cubI8uEdM&xThFz4zxBNOUludq8@x^_I_X+3n_87J$IyPz1 z$FdT?!fpE>K+bR6H5rr}(-`Ugz~_cqENIC1;()S&S-0@(g2n&ebdM2T4_sDp8b2b?DKN)1M4EfOcEEIGhIB>YxqGL8nzu z%;uKL5_x#9hS&O?0eSpTvM$z8U7pg~f*9G-^i0!ZjdS3r=HFmN=f62mv}nc| z2F&s-#@_Vm5e#?f+BVtd-_aoY7$X7-Lt7u^eBOHO3*>Lb?Mc$t5M1aj&O47T^IkAdB!Az4F>7bfTER2 zJ(2=)6T%k^Rsomr2VP+V^jJcyIWL=b!Hvby1JZkvzu3Sl%h?^xb8&tyct-(3|3`kg^;iny(9wP1%&N`hxzY-%DJgah>l&N$vjTojZuVvq-Na zuy{l>BCl42VphbW7Z%Gmqu2Fa0A%Sr*UaqwvzkX7^*^FB>%jwKUauHq*jSz2fQAx9 zH^OnP=;+5=o}SlDyEvIy*CA*MNQ0|V=t{o8A_r2Ta{3G?>^TL**IZ2kO_;3b>c_qL zbYTSI;Z^aocIVgA^+;e!`|F7L7n>$CN}ORyBxUasXV^3-Br_Q1q>)gKJ4F6kVoLAY zx?s_v3jB*%G%lj{s%cBwDg=`Fa(wsjt$OsrMryRYi@!zd9$s-vv7S(~K9z=gkn)sg z=aIfp-6L<0)sTWApCG7sCJ23;57@L8%eIrpa0JNK-tA)a|oU4lES>(l!A-DevB?@XB?I!Aj))Ph#aF+$lz%%(&A<(mH0=`ByN zzDx;G;E=`E1Zaaz_q0&3>F&di718JLD#H&J1iR2soK}SD&@H>-cON9n{5>-|M492iITcSKMz1_F$!tXW zc4Wde0kWZ^iPV0ey)@9__~Cx|=jUDWq;eykw_FT)lja#9gL`XnC~dbUd+FOjcZ3d* z?rMIk(@YHM=Z_9cnN?crfWvZE=NlXxbmrIdt%H$E^)FuJ`&HMPbHpTzdfxReE9!+F z`10fE@OM@j|A=)-!IU75UG9-Lf_gd>?#szWKB|bXEtqKCC<*{eWMYn;qPx`*hPT;kTI`iHa>XvrNVy#W4TekfmiGgu3XUB_&cz3j=^=CtH z!0wLTIXkMWfUt2?&uBN2F%+>MP+lo@_~KaVL5f81l_0K(7;ksRUXhWl39Ds!8y%Fe zF8P}_5t9s%aSgmZCL_}+!YOJ@ZS#xhjaq#K~V;F03BVB;3YTZ>~z9N2Q`KI{9stc<6-DDLa)n%bhsD#(}q4 zQBjWf=Tc2NpY>M`V%cg!=Z9UX-kK+H6sYM@SGC8-M&b+lAY>(oCMYShcT=8xnoD;J zw1nN?@ZyysAPg5gTQ|h`)9$iDiEcAG*+Wz5rPLgUEEq87xrhxHK~#2PO!oc2V!5t2 zV8*@=cxEi|tw&ShwEWuD9h~;M-5opA1v0$R1`d=+7qH(g<5)@m$bvId^69b*6l@*M z6QLsXPyE_1!NLUWzvl%lIAk}}kx=ep^sOs|TgVu0x_!Z=7D}|VWM*kJC9O_)N&+SUsv0$AC|d=*y?8Y)i#@NFBA=x8Z44YoEm;Xwy}$ zPHz?&$+&L>)*K!FBM29~K4ONDK=m;7OgG)Ch}r!<*p1*;uvG$RA1+wh=tR5>;_F{oby^t$sG1A|X8aHB`ndbljyL!%_PZ2S9CNz3@+LCut zLm7n%CPun{H0aT5weHlwFFnuGeLtcN>yM_ipf{8F7ujUT6vvLP(~v12gqR!rbKCV0 zPN@#1j}w;?pgQO(p&5_2$0U_ujQ6`2_F(d9rCamc9r^;WL*F}7*~^XmqeK6Tvh*y7 z*}3XwH(o4y=4TOOyJ3aP!{3OM016j8XJqK|P6bm7djuJ8z1kQCthkVs;k2@L?3kF^ zYQnC>%bP|^PU;LQR_+XNeXn>nG|iMyqm4*~O&Qdgspd+XZIz+{#B! z2K6xyUdSk2s6Ib)WM}RD>=b@1C3|a?P2@S1y4m(LX56t0Jnu? zIl-ZiUsjwLKlD54q{T>dYMs1XCaj5%`(@cRYEa*dm~q{X+_1lBi4B zN8o0L#_2FlI`d4w=UE+T|a_Uw95Nmc$!fMWx ze#$<=XLVtvVEjuMRxjVFsswPXMG7a*{qlI=!VhN0Gt7t)1$o_9NUG z(O?VWKPf)jZJ*f8B?oT0w=X3u3pApC7sjnodjKeH$(PaR0qZY-zZ!c1G)XT)vjl!w z6j`38i4oo%mYj(R-n&XRM(xTv`rqzDcLP-g7YPqb>?0RoGE5nWUoKDAjRJicksm-{ zg{*2cH7|zVMKNcw|MHm;ybSD!*H#OSsxr9qEmak_y}f_QHLa)~sFnbmZD1epjD>F7 zRlm>RaL%vq>t-9ce}<76Tu6BM|Nhz`m-Dutpb-~|LrQ(7PqKV}CCFRm0RY5@qL@3B z9>BKA|A#I|UtF`vzl!_Wra8yDU)0OJsdcTLYK=~nS&bC(9#Z5L>+)iCUAyOhiT$Bq zhc!$$^yBz|?8{e4y9Sy@$lu0l^XGRpJHqux`x7RsG#<5ApXZf!yxY`_I?t?%bVR3x zP6Q0Z5G&a2;o0V^(^gY!v(Yk^Pu}JaW!9f9LkfuzAG4oOm|~#Kg{3{UsN!J)!y|eu zU)B5mY?*2v$~|j_5*|31UQzLNOzTeAkv2lqG{=T& zB!^xy(mSSu-^NA;o$bzRl$J!|Ri@gAYXub#ql8*J=WCZ0$vRulfq5H}XlapLsSARm zp{XHzdQIN^6J@7eRKHdtVksoJ6SzISJAS2`)oPMm{B;DlVY4Ox^MW%+BpwuL`SW<5?4yb%RLTlh`th zezH02{&Q{!ee6S-Z{Ki?pvxjASap`7BBZogK=*X|Y|HJvYR9T?RgP{DyT_R6oRDd) zstwX``OWSEPSvN^Je|&!nyI-r$jN4yu>^ekMn$;!TYj1KBB{d@Jyxd-&GKsb`WmVT z?Mi9ZdBBQ?QGuuCJ3W@2v2y8Is$Q|*P`7&Wx{bW*+|^e7D4&vyoIaI{M>sNiBE_U~V zn)s9?Jb|r3Z1de^4v!Jcx(3h7Z!rQvkfi#e&!~@$*wufzNUC6GyLEzxFKXq(=Agkn z*ojXORq8g9^W~?@#b{00z^D9PsZ+P@YTxCzc4HhzWkVLeYF#4;M#~DE5}#BasxOYs6Toh-??*nNYG#I3<-+QtIOg`l`x}1?qElaTW>@TIjnvE{q zeGz({&9q2UJs>t|X>rt(Ft0G`%Z0f;XwlZnVe#yds$7U#N3p2O>~m!E)3RM$vs%`$ zA*3#WUCf9ri7?!n{2X4PSxo&T+}@=ZUiRz-iW%HfxKgNF%C^+E6NpSWDD_!?+nVTD zKFJl9u<3CFo4MDwh+xeBxp6W!na`8!7AoC9jxpZKp&w8jLyz6(g+(Mk3{Z+YXRBXZ z`YeI-GcQS}R8q{KrTs3&ov`)6WL^DvcuLE4JHc9nVjX`7*^k0*Rx(Q9T!~N+GIVmX zyKHsywst2+OX~2XQg5Tn;|~R#vKBD6d-l+?pJaN?za5RJu3Da(@X?SBHq4)ky$HHP zlsGR#-iI>AA~N(YMRN>u0^$2-)Mic_+|r^ zIi1bHd4f?qAzP?DUR^w-iYP_1(N8lQ^JfL6SRM6wLTvR%SF&5zMJ~|gWCiV9tO7d- z(x$v4VJSTnjVHc`_mQpj9Xe)rU?My{nIO~cnR5$c$P%R0<-pYir}WS%nURDpNRIlGxu3&vy%VD4Q~Rj- zJgQ@>_Gw|wT?+mt>0S2-y~9qzv2Z#;B+n%Gw)MFH@{yej@(Hs`i?!Z;-<}|dGS5#* zFQKl{q&1%}Ccmgt-yvN-E8MY39|^N;XA~{wIszxwL&CySh!5?^yl1{PcYk|gSHr^X z_Zx#2!j11b=rj4`PiGtgA|$Gy3f56ot;T4Kn$~e1 z$r1&%`}0^8cuE8uV+ZBw;&FX#<0?>Xs?Z(S;YVc>0EJbxNE{u8f79i{}`j z(k9yTQmsMV2#+{N0kh%>K`Ok3r55kpa8&4dg3yeF3DF4`H)tO^fjN7Z~A8FItD ze|cu7nzMaeMmy*M=!q_C_f(bN8@_b;lmsq-x^8rPd8RfXjg+-803t;1z|!YG zW}-mKGPJrnq8=wHdV85ZB~*OPPU;rL9a!w*tbcQ4TC3?I5lQ=`5<-q8LmyRAdrapD zso|UFTxk?1K>8KB8v{J9mPqMsAJeMpb>(%B>)g5+e}JGa?saaO+|tv0@I_#dO^Twr zv)cT*59EpybC>TIU(+#-?+Zqp3rLB((-YQ$d{EQq>L^f)-yc!&r~89rL;qUi6@$J9 zY_{W3@&sdXvi`%jo)t@rZeDRJ?v4VY))`anJZTSAt3BI>dG=5&X(xcAZMFvX0nPZv<@o9F_jQ>{xY1XMJ}%$g}9NI^+gP{<3O-prH*i{k@xl4gLW}&MS7pH(Zdcav|Kb!ym7%shv@Y-7 zcAf=a6|W)%9tD_^_K8Gdl)zx*(*~9^r0n@`Wu$n>H;h}Q;bIyTua)0)LpO*MF*ol;o$(|3*KF3GRjrST$*m5?H zN3wyCu@*RjD%==_x+*7`i<^tcF0rVWG(RCC=ZQbTEc?0;#?xb9ol31tcNoqKKJ#j2 zxc)nxXIl#3At7<6N%MfTx|wR4)u-nN8LwVnx|3*F+1p2lxqDca>rUB>Yd^AzOIy?I#TrA? z3G}oGpXDIxIg^|D-t#1hTgJn;cEP4UK*&B%;b;5d7qxw-UecgPsY8=Fc2%Hij_+|x=Y`PphI5e z#f{*y^4c&Vu*mXwT~}phKD3DOn|2%LY(AfqxJCzQBiE{Cw+HD{={}hXHJ-$vgW{#L z`$yP5N^>pVkL&zoAQmy=UYYxZZ=pPY{wyy{4p*ZG8^{^z7jwwH8IMjEp{M{Al7ncE zFZaW17RVb5Y)+EcY<06 zwzQ*X`T_`ARm(iOUHNEyd;FH3H({|f`F>E1(%#v=F6H}^))R|7yA?PtmS?wZ^~mfJ zWny}@otVFer5~o{yb2;RB}HEa~Het*3?$=LA*Z{X?nctlGSwyd`$5b@q7*& zia3{_U!5xUv%s1Cn3AgLeBBNUI|uBIj?h*iO!L=elz;AR7y0PR22l|-Le6H-s8){v z(Z?3KHuU5TP&^e|9Wb)^ZLDKvXVs!?4EL3-XP25E!7JU!z4>#@?53471Fu$UgEaWi zBRp$!pLw%{qVh|Q0}<_yxC8OECtr0`Pj=AK-3;j#wyN(nM z-OKZyqsDyj?iceh%^c`MHbBs;O=~_6y2itR3Hh4MZ%UZ;BH5&_MxR=rjoGLBI+}By z(*)7_hvAfb&g`b$6L;}ngXL5g?~`Nhc9ZRLoU?8ko9mD zcpnt%_Ia5gwH9-9eHRP6M}WpUWWivnj`>5TAf1|IM?2@a*v7TqnH$N;N^y$=iZx3U zbKsYr@kF>9kU9w(ea~x`VIo4v8W50SVoKk}d;cNJ?ZD*KkZd-u$j!YTy}8y7%OaNik~)9(9%n#WGK#^2>5ynGrUUv((t4`h8=-30Z^1 z`{p*iE+M}9dD3vr{mtk15gIBtvVjyf8u>nULOEo)MmZ20uxOjjrs%%d9GwCTA97s^ z6j5;p5ghM6EYXjhbT?nGCDLYcFt_=0?q`?1j&aiOd5H$rn=27URT4_(Bh|f2@SFJ> zb+kwFo7GC=QvRY7i?SDPqcNA(*4ROIU-GPS&_$+*jMD@TkHJd|^7LdU-OlNfKgP8p zHjEo9B~8I;tq$+CbZH6=?c&8j zko7cpn{MV@N&+lH)+NFmQFezof{(>8X<4BQ7Dp@6iryqVMM;> zlfIYc=S6zgcQYT@;)k8!@lx=PdB81+hjlPC1!vDXz&t7W@9}B6=hkr9k{=;tNt1_ zCTk1ha=fdQfSBO$+Gv4m8!b%`!1+2Ud@aMX6>@B(Beiav`18CiC7>%nNjIXy9fqPHg`ahs$MYVx^EfieMqc(@ph`)=h&pE_}F(-n=JdZmOr zX9(10pGwocK8GWD7^#P?^JfkL)NXpbr`OA!D970kYaB?jvSM*aJsWD*$clb+tr&yM zjo+aGEZM`3l$=(nxbNT?G^$1&g>1+-?*k%M2H(v2bu(OV-k^kSq7x7j^urPDU#WE< zx&e`c7UY8Yh-34tnc&?laAvP;w5kiUkl+#l--c?xR^_}+jxOn&C}?2@MaCqp2Ojf( zG+@DBYy+N?!e^poi^~!E<H-qN$|hp&3U-*z z5ct6_ZP^l{Xqebj#G;oW(g77PgOd1pv?ixL)g;XH1my&-2&Yq=AfFd$N)7h`K7?RS zQs~y$>zeF?yhj#UOG&G$z7|te1j2gstYsMXJ)W_Kl6M?u4rG@tXz=+G&RrtsNW4^! zHm{aU6}wv`mefwQoR2XG|Sq=**ZkN3>v6Sy(4kJ@LZas6CpQ=&7F z%YrZY$KabFK5oH?B9z)9L1b7s=R{1F6m5Dx2zSdV^gT^fi7Ni8RGLGqO0a?f?OwoZ zAn#hbGZq2zL)({u8Lj|bEv>|zmiM*xG#q!jO{iaRcN!GN3`5#U2$~mS=Jl3L%DUHh zqOXCGrYQAm#J4f*N!U&DZ_wJ^o+W?}(K{krFdu&Wk>-$C_b!sbZ@Cw=6G`=OEf+Km zM5OvOOPk4;ozrL56I~{U#5}KpcQiCUC2=#ZtzM;-H_Y^D8f^=wWb_A$ZAPz>ZRk&+S(}8*JzKY%0e;Hr zLwrL=Y8EBw(pd<|+hckh`28sRi z=WsSMJn@mwmmqE)-7V)P`o#m8DbbZ=?RgConmnxsjri7#GpcdUoC+ppx-;^l5G=eO z4j?DNuPQPh*|-~Ry4OeoWL5#VON4`h3g8I!^3|F|tw-R%9cPV4SZYmj3vGwI3UqVv z60k!A{5yy~)+9E$8x-ZlfT2f|TDrHs)ZLHqbu=B=?4O(K14MH^?AY}o^8zct97N~| zVRT#7;NC5UG933#rzl8j=bQ4qWAnnneRsYH)24Lb=gafiTSN0WvsJfH`?7Rq zL*E82!}wf^8Irabi`I7sFQ;XUq;N6w`Yjo>?LmC-16*cIumQ+h;l@&Xqmhe-$wL5t z-zS4z*+$o4>joNP4F0^CC#(zivjH8tS@RCGiF+R9u2d5xFRUBq&?Z`#Q!!1i;a@2G zKppaV#gHCdJsE4XyxjJrB_*%VxVZjA|5uZgilGNNR7Y$r7hQWarYZuIGr3ey)II7o z`na^jtQC8f*h|GA`nWx(3Ba_r=QJZ1rFp|%`i>+=&#epAu~fx&u3>lQ ze!VXT593(MT^IwQ7N%peZ}YTCyn)m;9dDyC5D4(gQybEyz0~4}Eb2?m973CR=G#}P za3ye^iD~0EZuP)SY$ePtW0Ad|G-}a=?Coa!4<`UsGzC%=nhnm&?oNP=WZz4eLeg@h z8-4>(ZLb^NZkvphj`M+U-lS(b{3e2Rz?$UWJ|A5bBPAn{2GWOOia*j=A8S*fIbsmj z=uKPfT`|q2d2zT7oFg!t^Vmj9ZtF*M8k*<$5@Yp1`yD1kD^B|ufkC4@t6WwP;IBp)y_N(Ox{rr|~GvtaDH>m@$q8 zzwqY5VNY)E@PoNL1mhDJZr%2M-V7sk3HB}WoMXK{pz7(dly-sTeZ29n(xzCnXT_yn zpv(VB=RKRma{*f)H)d6P4)*|A!gO@-O6)Z1H7Y+z9FmexubT3sw!FFtGFZ}~rrdXr z$`?#*f&{X&j%rVw13-49{IIIA??S*5nqBV+xqzyXgWVk`OyF5t39Knk$;T1uYhDD& zGcwc{I(yKbdik||M(9l{HE>b(M!12wAHCjsbY{BuGvv6Lt3x{Jem%PVannaX;7W*3 zo_6PItwCH_KuziHD{gaRK#3_2WLlC9wLF-p!`H7%jh?>zNWH?)>ZX7Ad@B+-RiSyd`fQRCz;+v0`27L*=bmCHHyGO&es_$~!{KFy-KwK~^8c zaqll|^_5lDP7x14_0)8u1J16TlOl8`y1h3>aatl2v4u&q=;$*xpo#|dA%R#PpT-*? zi9m;^s^m_6V&Wls3b~|zh;3Ek9OVepMY4dk-CHu$xjrN|wmK2|nDZ1ZK}l~+KId@s zrp4+9)aPP9a<6ThkML4$VC_OewDBung9Y$x!C6fODM>6fi-GN3@Jl<6BGjv@eBW+i zujk)dqqVpeMu%c*#~$RL+b1%_7Ma>tIs#LHL`@*2;;hg53Pc+9-9 ztRxKOSyz@Cptt>{T#p!xG1g1q5R}(<(*-LNt=fIcJ?BFx-klz$b|Gu?HwYd)VDabK zUW3*Ei{v}GLo^Ni=FDJIgN41D@y zShfLpsaHnJtQ*6Y`QU`l++u1tu6=g2eu4a9z8o0>$R9`*HZJcXttTp{Vgk1?dlwbGPz6h11td*h&mF&w@ zLJ_iOPqNKmvJ6=#DTbHC@D@70ZeROqd0=`3swV@lCAn5TTH6wzc<6x{4`gp=*2*7p`HOL}2;v3Q^R zSP!%priwXX^|$+oxnJfxAM^1nmf9k|1I&Spp4nC%!x}tEBa6j@%ipk3`Bq&oL0m$D zM6vuHFLTbRN#?-x7;ub=xn4`I)6BvDwBHNMtTd6HMS#bWD|Ix=R1rBSl$fFJb()~e z8LUB(zasp`>$)*u3-06!UAEuqEq^Zut()#l&Vt(B>wBf-v^H%GLhsWFaH?5e!b)HN z*-rekiI@NWLe`O6;V=)a}FScsj z*R5t`U5vTLx;M$7lQSPEldjIFeXT7(&3pKIgrPI4!UOQgnJp-ca3%|?+Z4_4ca6RyaLh3c|=;`y31{Tq}S?I+Q#$j z&r(Tba2wKFU^e;<#1f>~b7o0af$=&(?ml*qGz(NA6{qO!{yJ3n0$I1BJHHI@tVjp7 zi!OUT`1mBlVFggg#U(}gtL?>4|OMr|Qar9jehQL!y#BVRR+SKCkZzeM7e zPihxyX849DPJ0d)CSRAw!THpPANw)9E*d8`D4L%RT7fK$%1fOs9jVl4al$G~M%%s1 zFxf=+Dc_}j zTG2${09l8^W=dEdjK|kxGq2Uhbiq%%EZ{Q@zsz)$_UlC?4;3;z&8^MpJu=HhVs`fp zwx=&+l@9akn%lcuU`ve%xvRovarF>GxUZ>$oOiQpesosDKhubP3%v1d3?IJ~w^((( zi817;)4tZSp&S4jKk2MLL*d?AOH7A-@3b(ug&X6NttmQgQLPc(w80mzsnc%Jz5T)s zeHm(U4_qff-(!$;z2=Z0|}pLU~c%&@}*%*_K`*L#Tj z?V^t9g*)gHQhJ|M!1;$)zNVUfbBqaM!L(O(7<)4_Pz?%EMhDy=E#wKFrOmo8c^odvw|b92Z@qK-lPQX9}?!HYmXrTq7FdW$bq`u&3f)# zJWNa+~fA)Hkc%Rs|NavM*W+r}Tpq{PBVzRCC znnFHtuw_XNq&?Cv>b9v`EIY6k4EN}%KpI=6CFw!r%5*1Ak=M_bi_1D*6Or=40rC0VQvmX%x9Gea zX-K-DE3f!NpXJf~qF*K1&JzcZz-5~nL&vED-oz}<>@tcU6af?fC-J8dH5r-j%5Pp` zY$6MDh?TfUh(Vfhw&xUW%ap;5IPqU47Gki5ebK zxIpuFpifk^=*tum;PX6YiO@ODmkS6HzKS<|LdQ zQrzdJK#8J}{t~4HeIH<+#Z-$hF}3J@q*~MZOs2l0m!$C}t!tL5p%vG9*GyI~53X*f_WgStDRbe!_nB)MwhiW}SX1(Rf*&IZLuw6~nIfxwnXODshXKImCiWp| zaj1hV=0!l-c^q zZ7Iz|`G~0Z<^H-!J$l$#6u*B8o6=KlVokpZih+uUIpibNBNi0QSM6(TgN)BEnEiV$ z=jRRobnX67C|Pp?FXbg+bw+fx_pPoK5z<4?!9Q8$_1-3zE~Yt{x@mxDy|RbhIY>g@bMZ*^c0(#}Qecc?GGJ(a zejw9s)-3#{#)8V&2!PUY#oL(c?8i8>_>b+EtQ>bW_usUsntZ-LB&^JqLSbck<(wZ? zw1)1P&wbDE8-ouN+t5MRB>wQ+9#MsH|MwnUn{P13Iu1l74++pH-~x}7oe(}gBKZ~r#-e#!*%lWPGIc8 zq}P>pok_oXkeAvXtozw2lw50Y+NP>)D1TgO_mmF*DT3J_#Wta2iS51Q_ScQK?5k^9 z%#iQ?gI?f%SYC_i6v}rKdwtee60w{}1R07>7`8Z=`xLr_!}Y(`DR1JHyn~ZAh~-=D z)vXQTLLj=KV)H^@K)#-WNoL$A4`8r6mf2#)fD2jR;^${PI|ER((BE(khDJ@7TdpLi z@|G3KnHTQiT3=Hs_OYX3blpP%!ruVcu&nBanwN(y4~Bo-@MRcntSR(R9*OoPj$3%g zwiQs6pSL}e@+1&DtY;(zO+XlidQRu9Op8|kN`@R}l*ys`Kl2cB-QFxAiepsaxa{xC z$^Xn{$o`pZ>WXb`1|+@UiY3_sHsrn)Nc!3I&m;0*nu2p*0A+>YGeKc0BV zRgx5zmp`x})n6%M%q?8e0{=2jUK0SSfgFK-c(@&jZfA{r;+CoP)a0R6(lx2J?osXJ z9t78q&@`0KZFBmYpv3~YPk${To=7dWuyC(v+c;8`IykSeaP zq1{rHeTou}TQNHwxHkDRAe+c7A7B@^H!Yz9vm+l)nHZ4dmf{H*na zlB^&_lEAP2jnMWYq)(&j99k^CwoGjEr80Avpip&Wt!;(U7Hy#_tw2q`* zOTJs|%^II;K*?R?cRIv4Xou}-iC+fUO`ZKOB&ME zhq^^1jbwX4So1!9bS3^Lz20UQ%s8#!1iiCeG7DZ;ft1*w%kqS^#6OPMosrzLD6~u}9u2O;C_osz7rBdPEo#qABY=zo(=*s_Y>OM(EwH z|NZ?i|Cn^3r4CWtwzd8jpO?7r9dXE0)>9AuO0rqVaP>+<&7?ekLN*=AezWnLW(^=m z*zU%=(e5$%iUfalaC(!|njUyOo4%2_pXu{y6~H9in_$Ggp$i>8Oj=_Y2G!z1!p$iN zQy+QB9sUvs<@+3=s0F(CsxcbGbSxKQlw%1Z@H6Ra;CuyYH#13qH5$ElhMInEy6Afk zWTB@aXl-=aI~ZwFb#}1IU@E?5_Qns2K+>0d_-)}% zM>uZgnK9!AX)<5-n+k5_PE0T`g&MH~}yGM1J=NnBm?^#afd*_jRUOUA}XNol9=+r6K6 zkQ#t9M+fM2+s_;K`y$2rP|^aCyKU>hGp^S6lv=d7W%g!h6r_P%nI@WO4onUwGsgC1 z*hbd1Apcqn7SecPF?P({V}$*)G{2Y-m2L-;)l-t zT3tX$nVJVQ`IcI5x`&+}G8&c_6uElUn75j&*V1QzHOeQSu+8uK;KGh{ga#C&wR=A= zF4D>fnUPb$H57NRMQHI19V_y^B$;oK=38rcAXKx>b84}t_z+QP&`;7A zUyhCE-0}ssABotKH4&gh$H>$*v{vxtfl_b0OjvT|NY&?tj0qo9|D=^)Uxwei;}P+F0qUR>)sZD&s^wZacy8lc6&43o!tOQ&xHAkTK3-?0F^LOR#UEVeYDHZl2^fODgN z%bY3OCBeroXQCC<%K>vNu0#lC$!W@dP=yv0*45Q1pQmY- zz;oAfHlFAgfX!(0vb=a!?B;?GYftu}i0hY}O80wcM+$VLhUd)5ak+Eun-h3HJK{0= z^)-GVGfd7;>0ZMD|H3h5*6c3WeU0GKh=y(pKfNsQdV+D>HQSUbBzMZoo0i#y0tCra zHE!ACa&w_qlNN08dB+sr&p)|1w`z<9I9g{7mzUsXB0+)|#p;S|?M@6XP3{kKn)xf8 zaGtQ6?9V3Ff3(wPfg%jUJGRG4s#x#$(5v3Xyaz`c!E~3j)MKzy#7!~Yfl>cOr->o* z`}{>(@TChjFW-nWD;vuxrCWjCWg^%*!d|D# z9v}Az^Z)jr+a^+B42WCzm(Y)0s&D@}IeYq*D}F5qrtJC^WbtZx75yEvyH1H^o}Tw7 zZs$uyB?@>3W}4pyz$*Y)9&3WWR9SDPqsEc^>i+ zB6JXA&$nc=c)03T==V zsE|w?Dkpw;n^!zUUW_P8?DeVUH#KE>;Qphkt+rF*INiJ@VNiQH{BcG;tJep)q-g%9 z_r~fO%Pd@&@3gA(zL}*gvH={A4kPR*z9x-e$`Tn!*ec`lP>}}80UK#OoVJv5l1)8I zKuky3h)1;Qh~1yu__ic3XZ#YN!0||_L7a39VAt7x3wKuvzJ;%VQ>?5SRS0m~j`6mO z5TKEjZluL_*sXL_dtuCvQv*(4qWgQD_{9$&dXNpEu=+yD{+s4M{(@e!&@vNYvezsjFyx2sf7Td7j>@Ud$KnV?0vD6Z}!(@onKKb)-=D9 zat_W-UhwmcZj>G8Nd7D3#pv7XcS2byi{H*uIl;L6kr0GN&tEpc8~IP@ougK}J5C+E z&?wty)h0()JQ-20DlklD$9Tllk>0~`ooNks3Ro2_pRE|O8Vy!Fg~b;)RPD7pyh8;q z+x!w-dZq}Z3m^g^Mo@;&U2H zcqRQH-Lgfi>tg7?$fh}W-?aPG7SQ+L>k8It$JPL)Ow9ig$u2b=|EAmRNc5)o^ys3y zpJWX}uI!Q=)91H+MwU=n!}WVG{u#dZ+0#UpNw@W2|MJRke)J*{?LcbbFsXBVU5Ce@ zYt7j-nXz|uHV{S#hw9xk^<)>Kk1cU8*;-v65ov5vGGqVRDM3Cn+tfGV_*SOF_hK89 zlL}EEa-h#=Ql6bgqXU+RcNPn*a?&Ne-MdbKj7({*lkuV?;*f-1?J$`m`iJtR61071 z_wi1_Ho|Nlx~S^gF^pk@QIPEUw)g3cdxjBTx-}!pO(`N1c4JgA%|sqn*eTTae-S3n z)Nf%5iU6+iUxk@{Lt}*THfy=QS4she8YJ#q}t{$CGR17mUs|lCx!kQi_2k$w-KB;KV>?{WW4}O~(2ocPIt91q( zUZgnn8t>_evN!5-U{S9N-=|K_e?B^!eJH+x=2<(lRklbNgbb+?wuc6zf@ zso0m;?T&@tl`N8KD!*)>j-J8e-?EAmzV>VFG8x=UQl-}gc+SzYZQyevOuEl$wES*faai94Xz#WoaB^#-ig5HiMX~jW8ThIZwHf(Fo z>PvX%lPRaJr=7Ah6V(vBmgP5b0U%PqB-E(@lmR|=)ENlj!(s>`)M(~xNvZA3t|=b* z!ii=#q_=*t%Z9jaO2c5(9AVp;yr#ZRhO8&06=*xhrGziG9@jI(&$@KdPH+pYz^=_w z&hCdE7<`?7R$CNiHNYJb8 zd!!&jFBrl*=)b{1Ass&*Lb{-w#g|_mAqAZK4TuRJ0`au|)k?0lr}-3-s;D&R2IAzF zZS6_Viw5Fmw(aXc-9{85IWxp791~Gye*OmS{caCawFO_gW^iHQPak;wB z>Z=s&-auiULv< z_#m(Eaq##%-O{FBS1o7si*OZ2jqqdA*7LM)e(GbmtQ)7@D8UKMb=$2Wq!(22>A8pd zShI7VZisi`Bc(3z6=9Yoxnk#yInLhOAYb~{HAGL+<96>ofeSS}hvftl-&l+Up5O9! zOPN%^(!m=y)J`k0mw2GzH?t~4H9GWri?N*_lZXLAP#~U3&%6r6$J+s8?#YNhzVnur zeSIru>muF1%yR9y>Wm3z@Hb3|HdHSkTE^IiGGNr&7-T81z;i9Sg?G2(`to&+j(r9M z6y08O*|(+b7o*k90SOCDEzA$-wMfsjwre5kQHegfmRi_!98iKd-`Oa!4ZYyws+4`M zX$vJ(L{_^MpzTg90Q?E^Y@+DV^PTxysU{YerJ7~m^+BgSHnjtZBiiWV)Dhzirvu^! z;WH}{1j!}|?mF57r-MqL=e~-y&k|y9LXXm!R}AjYLr9a;helR^c*d3%%aLW-K&3A$NZh0z=bz&b@`Zm&5tu5lPxs;aF=ROgY+t&;%bm~@*D8@R7$p)F zgnPpv$1KD5AERqT$C{6ZZH+y5tr(LfQ{P%&aJ}5|@p*)pU+sL#@nFaPU_a@(+L8Fk zEB2wPo!?~^RyR3az?f3L5bfFQPBqQmwwJBfeL61tDPC1ku14yZnVFJeKk3mYB7Da{ zN*2rE=SlYx3*K`}f!nE{@QIVMqBo>9sU5lKLPt^Vca!FI^iT&vskppp>>#TDgE&^? z(j}en!@=&RwVcBt}v@GEWYD` z_K)&Q_#VS8+*eIGeAw;gQYY8ve9l}GiM7PU1^6=Y`p32by0cfr8kR$6gx0?an0Q9_3J; z53gR&j+nXeB88$cn}4&UTv&Qieyw8t^6B)Eg>mI_7($Ad;;M}eU6U)5n`lE=Esok0%pJXSS?X%|IqY;?>Z7`72wgy{Xxs=7EwNR^-TLn9JkDq zc_7S(@E5(PD^X^Zdos1qM~&oDKeA~o(^oclrG3Er`r79b-Sx4tAz4ku@riz$9z(+M zgiAY2O>H9nm*E9J*Gv3uYl8HCTb*aQ4M@xMBxoAYAW`CJ2p5e{BbcW&g+1c15hW6N z*F#COGo~6z@s%+{MJ9Q7BWI+pR!zZ0M4pI25`^pFcZCwgkWw;m-{oZw3@L$(Qgyuq z@+mSDHT{sIm*vou@KWxCvr53DlD0&(I?VK1ze$OIJS&gL)L>_9-kSnKsW-{Q>H9ZP zvV5_Ahjq))r@M1`hF(PR3Hv!*QsHxfEq{V{fpMj7UXQf>V4dSsG$*-rs%JS+zSOE6 zoB=P_wi&9Wq}qa;Gu;bJkg9}oo5)J1yNeXr8jW~N0a}<00bq=?yr6~VaEa|E=-+ip zx#hjTo?i{-DOUn?(;G8BSHgZ@G2OSRSl32_piZNT7!+vj082b=_b{pZ9 zPRnHCFhX=XYwPJt9hT>30Vx3Z>xu0idH{6E2N=^1DK(>7Ii{Hl-b_se-1&WQ*-x#< z%>8zZa`MB!w0?9*F7c1u*EJgukz7h_UhCB$e4CD+gNAmifV^%oG>22H(H@*zDDK(kg^275Y z4l4qF2OJWileEMbz+l^rP63Ph2(X9w^>|ohPY6c_B0KxWL9gl9rNNE;66bZyz33ND zFDfo@OAj60;2H9UY&iK^6AU3)OdYwkJ`Hd|zX>+@LEmsifg!27w@J4TSn7w<>89-h(1G6+a@m!OH4;bfzKbYe4mh}4&m7Qs zlYj=`rrh_eyt|VpqC^XrcQM@nT6}`Jq&?w3Z@IM+h_=v)h~%g5+WR|Q1i;$Q;sA8# zC_Nw!;2dN6Y7Rt~PC@nS*P+R@#JI+7H=3#g%Dl;rtRS%t zueKgogBL9J4vWI`^TZ3YW*_D4`Z3QrXVL6yW9suYR%JunfgCVlW$mxhoK7t20$4E6 zK;O^b@$CCb~7$p@t%J0#G`sg`S3z;v{K(k4va(L7ipXvwnbm_!{L_j)VWZARRV6Yl<1kNUMb zHlkr(`Z{X}=k8TXqW`#*~RUSU(M`|J5d;)2P##@bq!UslaM6vPgy=w3JDzDufc zWbp3bMTXspF_f>D&|yiJFS}|-tejqtGHjz^ab;^ME854O3GDlOVnmX2j2}p8NbUlv zadV@et9X-x?8z7pQ5ADqneB{ebn-i(x{J1NB=Ddki+|SH0d%8`z(Iy>v70uyGk_f% z;Z;vz9P|1gg0AJ^%@@LZ+0G62Z!~@ay4-!iK;;KuaeO*R{6A^(9n8Zees3yvWVKSJ zf%=?pt;|~V5 zqriFv?*B-8L3G(H^rGaKOIw?FGW1Y{P?$en8ZJa#H`v zu7H7)X&J?%A<+T$EJLwT&FJ)KJVgHnqC{p zbl)fE{RDLlF-b9sz6fE&svXWR=wPzTM0dvL7;BWx-7#?d{IYqtD2FoRwh`0P=2YlA z#dFYmc%8rNOTO7uGzO>5*E`mx+*0r%y5fvq(CLi_E_1wmud&S1l(>RzGJz}UYiFaQ{XkyN^D7TnA+MIshieg z-n>iHm_&I=WUx>5247rRUTLyV0KiFbS_D`y7_^+etOfZm8~j2X;w(tFyZ6Q&yfk z`bDM4mcu!ey1~Xga=ss+wpPEj`e&}~pMHgDG+!ohi5~|oycSq(_l2*0e&W#5cXe}a zeY&5OQeC<2f`|y4Jkriv8+vC^$wqBs$@HT!yPWKaXoXvOt_AW%OLPH^F977yc&tcC zBI;s6s4^=M!S#^iUS1R#<;2gfL$6n)Q%GSNeVlIOeKfO!jDIOR-Qg^lWJH7|sjWFq zmaw-1ViuNSZNhP3M609I%;a`}TLHY=95^F2MoIsp*K;{p9U6Z-&F-Iu&Wy^!BcP9kJdM)hgt{H0gejG`R zOz6`}q#VJNN*;LZf0*X$#DlvVzR^i)57-S5T$nGryyhxyGr4f~w%0CPMvW$WU;h}r zvXnn#&ALue{|Z(9zo&xi_69SCh^YR^)Y^Vo*}LBpy}=(Tq=7`EmG0y}$C@bvcor~` zeLGZ@sUrl4vYL^$kBfQ2J-BKY^=+xum8|y8sJ)G{RrcdiAg>#4$Bpe z0)3v**JjrBY6Ofgou>g3}Z~{gBTOoD}?~T&Y`W#PJymePlId$ZlPSeTM0Z1 zx}{hY{uu{-VklaO+*_(CYj|6{?@MD~aP!R5eJ2!TC{nq@Sx15zJa^yNDFad$0kjgCGqZ+Yi%>r=5CU6tXrVPp3`Y z%o9KHk|9HebQ{+8pz$9`aaG|?-5-*Y_T0icOyaFoI_G%)yLo4NVwj+{fzY^P*xZrW zU+E?UyXP#RW|lB6&(fdf*Jz8U)YnMQp5*_|RV(K1^OJ#t+vwcV43sA;oHe-W-q3Nb zw=|09MJ1)1hYo!axEpxVczF=$q1usP$OxXkGKPaW0-Pk3{P5tbCY}It^h}}P^gsvA z1lj))>Q>O2zeJo(d7Bq1+L&Cf+b~?D;#+XXnD4FECnP>!Cv|BcmLuGsw} z_NgrmVZphz3TplD%5uLwp^Hq20)HQGG%+)_{;r!rY+(sY$#&0Tr}HjQ?4RQ8!}t_U zas+}M9u|dzCtm59joWbnIb0sb@O9fcRVQrer7Kdz`_s#hKsMSHe7Yyawm7w#HKFu&6;qk@S*R!qe=VvouHXXg3i!-{GVyvc>ZY~4%n6*e$+yb2THzXa$r8~^=>X=QE+c$#@TsAY{hzc! z&)Qf$ilIOP>Zi=cSIlte)uAS*HFI6n4P0)KoKHPNhLG&(V8Ygk-(SbUt{`CHH#09@ zHA2I3B1KO8_L40`3(lUKbH9zT;u4~72VeKiP``9Sz zB!i|Juj$Y61i zWSU(u*?s@({HGGr@5L6!N|_N zUf(Vk40zp;ZB{_Y8|A58h=0l0gPV2hq!bbzIT*`{n*U?oEwnD{&O|45O`vE6{tyK| z%!IZ=s!+18I&c7#P}M~_`za=X)*KWBdYW{1n{yTE_|Y!N_nj`r#9>pv9u*E5j#+n= zc_--#Gwf!tN$cU#>nXn=|3@W1A%7lDCE!el$M(+f&Xu*{>o&nK%9SzH6XFrl%6>f5 zJ>@GvQq?@^9D;zPf6+iZh>m^9D3a;~d=5V- zrRDNEu=IRu)k>PEH2+;%>8;`+xXhU0zXK`t%`pqb$~;kIVA4xWH(2ID~-*$m#}GeXH!}tI}^$ z3IHz+FH;n`RLY;n^2KgJ=97lRmbanhK~DNGD0ou*Iz-K4w9af`%j2m$xwEifzj*s0 z#!$!ce^wJuZ`c3q*zDa7q7KImBaw?$H6HhuiY}NYW^PU44uB1$bQbQ(Cj#DJsZuqZ zyJ&f0*m~$e7|)Of@A2^(9k0(dwE-EEepVxXUbsjXmB9muQfTR%eM~a!%Sp}o`HxXq z&~$N5K30BT7S_ube*o4~PQj|ONbs^rUKn85u_+zk-nwXS&%-8ooLkHD0N#EfSd^h7 zHmBCd1Kitlouugs?RBV;+Vb`X*rog<4@9#rcW8N*67pQ!jKB@knkLnbL8@!^1b?yD zczKBDC@Cj@$#N;NN1@1Q{Jtni7i-s?k&qGCkEX>5vM>x)#H+6UV zn~zGwsCmUR=L$8Y!^~D{S86Fshdir4$6RrG?-TJe#h^%P;Os>^SbCIQD~IRZwJbH# z)lK8cXxq?so~grquuxB=6`-N_Fwd9$BIk`=0FHh?PVdW&#y~D>N6p^fKqjumY&3$N z%m?kh;bD4zsa*PgC~!JoA55)a;Yntq?nbA1dLQeu;a}xJ0v1k#u>P3_A>cq(<(oQ# z?eFs>b9h5`1KE~C>$qU%b*+Vj5e+BBFXgm@1s4}?95pJ7V!f;DJ02a!?{W9pnZ@Q} zm?|8JU>kxDE(#!@dIp_jam;(sSyv`?xogoYurEimYr-Azq9gFS55#$t=|u$h^+Laz z<69ku->x1gR9HwzPUCxBw}&$%DBVe#CBCHp@S>6s(#p)I5ulQoA3q=_;|VrFJZ<#6 zG;w0@$(ce2{4xHbbpa!eLHI>X_j`2*PH{EYT(q}Fokk@@xBu*{5650QU5llV&zH*Yeme!h?%YK3i8AyY5GkoqY zWG!5jNjx~J2NH@c-l9_^{X?SlfzHOKjv@qx~wNJl>zC`l{7(#rnfmYxbTAV z&Eqi+q}+a!YKLf6UPRaZ`8IW`Gc$K(sM;M=TextCbf%|Mz!|a54 z+TWIFr7O`3Os;;BWGz(cr!HUngzz$iRTcqvGG7iw<#yK7I22IQ9or|1{4YRM zSIawu4)P)I8Zq=EJ1MN(IVdrID@OEwi@n3?4AFf(E0F`b7c;uPXvAMrH$9y3W_Q?m zdA_>|wd6BC#252-1z%4%j?a(%cwLhC;B6zhU#9qiDIdI9>+&XD0Ymi8Zt8x8xIjS5 z&ZK8SyI-b$K?P`X4y{Bg@x)~`TrZZ}0R;V)<{6KlAnH+Oz#<;C(XzK`ww%jcbsVuA zzVHP!2ArfyDb2yQ*hRrk{yP*k{}XRJa}tp7ncS!AWV9aV{&bY)vgTq}5kvlt$jXl# zxcLqw*TIeDMFjLriFz zFhLR1Yk0A@vAG%Z0?1j>*0p(UTW{R=|<_fH27MqxB! zmA$tD-yaKve~4XCDTHZ`4(*`20M_JAEdsW z$58(!`rR|PzxNV6`?wi`}zZ!?Y+@D!KZr=B$6tDe8RAZZoRYI5fyBHS?{*O?!co5fe5iL1Dx<02J5 zeq6jq+L`h;iDDzfoB96?(nMco0yrN9_nC6dTJBI4 zjdtO%CL@WOfnUMbR-NlUIks|v-LpH<8`hQik$~Q3Cqbozck*AFjPEE?GI0<+CPUew zu7EIJ0fonantt6zyi-nA8V!;HgE)e>zC~nLgN)CXBYD1O*zk{}?j`t|Hi*E~N;vzd z{6zdPoNC-Tg3%9ix_~K9^o$;f_iU?j9)2R#z=%Ih*nSi*dW3kvh23_4ib4T3a08b6 zr(a352fzYm>JvsBM)3B1sZp3RkdjxOeV@5J68&L6_$a)Dt|Y&<=f|yfe}ZbGkOk1` zioRP?&iNqg0I!iDV#Zzh){@44Df5j7b_?0c1xvtmu~k7sPOWIpFf#H#MIB54fXM%* z759anmX>!)fRA&&T4y1`z?$Eu>Hca_h`v!3S>a;3=jkM%dS(vX=w;bUHKippO1CDB zXZm*mfuK##sjC-Sj-m}uUXSAm+Owc*?GksA2v_Yoj^1$`wNWlyJW0r2WyNOn6d?09v+5 zOSG%H{KC^{_6KNbhLS04PGAVYqWyfA83i7MbE@>7i9uziZhqL`(rk>2Q$7+4lr#8G z&3qBzD6V`Hup$`FISf^NqHkeP-~}uaEg5jih?x~OOM85!x`m2%Uy65ll)^VxW*mgv`sV%-?Qh4F)uP#O1&IGhL{lFjX%7LuQq zD_K|!WA-9E*8NqZ8~0gQJI*j3%n?HKSMfn^9P~i+V}65`&!XP=z~V2A35$57JIxBH zZ}sO-UHqF`^1t-uuL0`4mi7$fh7%ix)6%L)YLfT!z-Or12{JmbU%$2@=-Rk-Dw1cr zQkBuipSfLDuSXG?CA1ZmZ~v&$f4{u&a?dL-Ngg_gzA^EUlr8L5ME3CGpWjVVxk5b< zEY6fd7w+TaA4>Z^qhu}zkXoX$i8T^N%>}FYkcS(Vm97~K z=Fu5&v6N#!b;@*YGp#+Yd|sr$BAR#E^@v-cki2}g%(snoIj*^DM;apA&;_Q(cBh&i z-ol*5!PTmcG+Y^*pd2rOCZ46Ly~h7?g+&Qg8_9AoaE9rvDCxb&`%dc|rTY~*)iw0r zHi;JI##unUNq8I4L$7osPto((X@Lz8ZhS<~7)Zu<7ZXW~ey7$KX7-TxRkbRdN3UI6 zhI66oGj1;%=R>`8d0c-2m|r|)eQyJb_(b{(TkPo|t5ZL})0_?hJPY^I;ZI&MvZXuD zP&;F;D$gW(TKuT8ma7{U;8*nS13X-K-|`Z15Sd8)G5!{vni~$V!4WW>Cg+X@f-h2& z@2;`2F7uOAynw;fDJq0k z9A?{@r1dRd`8s9S4JG^Ck91jHFkUiq1{FxyU1MxCgMin4$}3*t z=c!|Q7UKa97^xLKyR_lMQDv}vxzou zia(xU3R9rsU>I{NLR;)TUEOy`{WDE{H#XqQjhqeWXo=WRv8r#30FI-rxm@(X22Q4< zJ_;?oE6)oLI%mN9HMrhF6Af%su#%aT&Y;c^`ZBJIofnfW#(?dplnk&M4iy+z04!=cfPx6YwCThuW&_(S~C?XAk9 zmW{eAR{*;-ejv@Vdw&L_;*%pid13GH`r@tFg(+P6&}jymO{~}gJ}!n45JlbRRmg{* z`ln0ymB@xiKE>x`)hpe9^h?!&fAP~x`QVEi+{poh}K5Ppw|9 z9Pxj_Y*>yFFAQg!7yay!y10qaUqHMw0OF{>vzYSl^KE|p27wS?J$t<23ZUcL-+E-g z8l8!&zz7oPs%Wt-cIR%;j3G_g5R<9oO%J*)vL%i7wBXJ&x0Pp>$=VUE{o0D^d|vQ~ z-W+}PdwHJYvDTZDSpTOrHPEjb%c4@^RW#D?cmLRVp1qe-c2d|DyG z$Q?IaEtwul-uxuw&=tatetqz78y@}(CP*;H7~7;oJY}(38$}r=o@(#{7IUnd42re@ zg~0q*<8j$K>8i&tvG@+B^5K)qWoa%JPsd)m$&bvGJoVq|1(x90GR?QCX9)0X+b;`m zx9#oK2JtSJfgdHo>0Z-39l&3j_1*ZqXH#DRhNDeWdiaASV++$}9A}s87bV-uJ~vY* z>_%BAXQu{SG%Q$3M&0pC@Q}T>AB?5$Gx_+iZn{0G@U_5Ju5zxjN44@Lb0PbMRJkpm zFPwDBMm=1*eZ+G=xw}?VPflj?4tD{m8(l11Lwt#?eU#gz9aBKse6i58Lj?r@Z$_p1 zN;(h8j0bD<$8K8&A?>8nDAqH5YW#jDTxxSVHf~J<-mu`p#G6JSh(28-&QcdE+begh z8^5tUQ8k+uT91yEIE{-AWV-Fq%%98Ubqqw}I+LAny4zQ_ z*jx#~DI_8FnKBlBd{TCCLA!`{K1q5yeh8A2ee4AzW)*D;eYx!vR;Sxbq7J8&7MfS9 z0;7>YC~ai}_SwTgk_w3Rhi>l}u37Kn-HAc@$DXAZ> z9r`U4gYNZ4`y8$ppPA`iPHdI5xuXpOxXB|U4Z%3;SDAj5YWu7>#d_-AM@V|KM7Ovk z_a%vEfuE2))U`GBRA=a1sK>$K!4`EjOO}!8?<0T{dsh@)Ij1kWZ9Rmw-q8blo5J|7 znL}L7zc34X_Uni7$=*-Yt3gQQ)NY0GWB`ih(3a04r|WBKfMUqjQ9v%0>lLgHv^6?D zCMkD~wV`zU;pp>4fkEV>CVQ6oZXGAzPE!+Z*u3PuUPGeZ!1*U8oAJ{SM}`=fU>;7S zdoF+s-7quX4KgT;wIEBa?jJ60rJwp-m=yN;l!BY?kQRo}6l&(h0@wl4hU4>-PO=}C zh)-WA2zsWvUdw_!DL;~tk(uX2J{wmji1D#ZW8b-{VO%VBnNXE<=s7=rKuge#)NceR zfH_)s*X@-iZs=_@_`N32Z_gS-IZZWghn=A|C(O?~ntKe`SP`^f(+oP1k$++LtFPt3 zES4#6O6f88fXJ10bMGtI`o)H>?S`VypX?0>pSZOn>F;TgWGja^m&$rcY?tJ28wn> z{qGs(EnG?G)YL&?+=D8jyrRGXY!6vz`B_-OK^w9~9l56Vf>l+`60xOc=|uIrE$yDt z5eL^ym@ew9vSf-z6`XB<$NB`a+j4#wyY9tiQ|gd$g1Gw7^SnrbCO6U{BGqFLrvnQ9 zcAAV!>a%1Q^3ZX04f1B?OQS0W+v^m1S|urn&L&POqTW{>IhDE`Aq1(CC5 zv9LCiaP7A%hDUUQi2g{&yrg87-lZ>oeOKElPb0SM$8H*NELOwd35jc)9q$r|vQ;

Bhvh{JU2~Lvpt$WT|j$HJ*7m-OajFZqz++ykO+GQTOi1PIb z!##LtE>-n6I0tb}#4ukbeS;WE=azE(7_~n|p$_!G(tmU66+y?KiBmOGD-kTpA{n&A zA5lkLX_fGH@|k?AMui__%6_1w)p5EJj)M$-xEqn5@qe_vXIN9&`aY~<8%4y1D0P$p z1VmJtfPjjMbm>AUD$+ue5&{VjE2s#llt@>QPUs=D5CsA0Ergy(F9AXe38cL{j%R*n z&N=^&Z$5BwUD-+YUTZyTJ>|adXJ-kjb_HO9BS6|=2T#*?UCaxp$9WsCnc<1!Sc3uU zVMn`}sV1M+N0(zS2BQzX*yybnQNYW!6vnga?yR1kHul0-9t;;g)qqxG)+hGJ>2Aie zHg2xIKz;Y1H7*w>7H@v!-_nrzo|;!kbW-mPlUAr4LP(60mKpWei{^*@zo`|?J&r$k zJEkEuy~{G}#^&n)qyvgU!3O(EMUoz!`|~3_$O!B*qvs@cfOM#7e)Bw_xsQK&LCch`mCNg>P}_Cwy_)gh#~;8^mj1OT z@>qW(O*lLJO7m%sWK_9*V%_{p+I!8LYQ+y9_tw(Xr_18lX6d~Dv>Njld_wYEj~+Xwl&B=}ZTOquv%HBPutiOnD{9%ZWcWMs7Ml^I&T!)O6n z+_zUa$Vokxuc+8h8+}8lj$y}KKknMG;;*_Wr$lf2L6ZK8h#?531}QQl4(`cU{G3S( zg}&_M1z^^HtBnFDRBn5_fyUNUOzDuveX-j&6Y2Co77+3hvJ8_7+;`>3U`9~JS}gOQ z-Y+$OBY5D@wykgRtERIlIHJpywm3~`OqbMVU>c{T^fp>deu zpI?=S#-)k@p+uqvyaM9+G4%H^&WKlyA&@qZr0=5M@O;5ZPgY$RuQ3SE9bR^CUS3KZ z{)(yD5=|`juj1$zOO%-(gZ;UMJAR>$leq&^VZ8K5wuY{Tt6HtyrXgSn46)XX zn5~+P{%Ql1P1W@_q>-t-!&Qpnm>LOH)WoQ0HSS+g`V?;X^2-HgO_@|~N<(41wq@>O zX!8}d($6^BMSyJEs!T3Fz&0FpaC_?Q{X44JTIzd!uIzO0sjdTlXBNsGs}No2ie|&mbITlV4ED1LO`B5`JqoO{}7D zYi1_N+Y-pzK&HMQA@}acBY%0aOV7}-pI2q{jEKrCO(zZt~lac8-bPD~v+Z+1nlA8L}ysgOCu z`6{=VvcTLrPyP&k&RNt2lX*CfDmK}HaY9}0YwUfMzE7=)NJQRC ziaz>N)5n4JSXeNwSE7!0X3o5XA6-cy^}<8*CuXzLSJF_ro`4op0pz;>Kj2Ja;!jZ=R&;AEsN}z zIF}xK%Q|Tq{02_-#tvh=(-0c#C~x(E#qO3Er%ezKUe*D@LQ@@fEW`}&=iD5gB()Mh zZJJf=iNYXYF-;hs@2Qsz@}p8VaLZv`()_81>L?m0!^x5FPy^Qe{HU9ii@GT?S42t~ z^w;%4?>4)Zjtov9I!}45dY@#wFSFtN&hEZCB$j9XwXy+Mo#i7bD{`}eS+_pF2b8 zJ|H{uF&=-5P7}7vcz5d1P)dq^fSp6RgCCqe0r8f*e8>8n+$$x|V`ig%sRPGg+;=bF>vBKG^rkiY!H3VH{H(LLENE<>t?rcJ1qUn|{vULBkQ z^ousZEN_{zmmQZ7! zrG%Uys@kR0V&36HvH~mu#_v6>P>&sUkFVg5{dTwO)AaVaj??hACpH1!)UZwU9IuYK zMLVP39dfckA4B`AInysV)Qq@g?2@IU@^cOE?0`=5=+E6N^88+c6s>R9O{0f~j|~r8 z*)xo0@_1eWk98ebhJR!D!SW?Bw2*|17)h6KibRXwpJoj#xZlZ-nc+{{fEeZXUwJiv zYOv!E7;szzkAr`&u%8pPAu6aGkPL+H*%vdy-0pQ9(pyjU!bs&etYi*@T2#RDOQ$i%eLn*7FrbG zIrdRrz@UGHJ&CTS*MD81#P-eK0VCffmVuus7h30B95ZFL&0ScP<)2+hJit-=WT72` z88ID`Ea+}Bw6>{c%5IIoAj=5VagtwP)!s~48l{|=Sm%{?NUXE3LS8B#ns1(&bs3o5 zQHElXl|Cgz&vfKTAKGI{($gMekjA&16W_U3Ytb27XY)besF$&K_n@Vb1oea2%;CMuB6V`~6tIfw zh?>O5M~rh+1ITfb{hWx;`EErEb+{ku+EM{L%XzZ(y+;lN$KvW7Y%sn%qcat6K3(_*UWgk zaYtWD_lA*?>WE>5(aZDyESdbD<(f+|(=JBq>(HFtS&0@lXf=JIisioX`nBKkbP6-z zaoEsEOOy?&fER?OJi4DdIe;9l9t|;vtGGz}b-+A3G(O#y#!6O2FZIt9^xMRtt>IDe zQ8pG;9E(ZK)Qm+)LaS}fP1R=EAoJAgB1EjUyMjU-+9&)_@|)lYeiH5b6{&T!-@2Mp zVJCjrP#sd7?9`m+dXOW<6QStiQ>eLwwf2UNe#BuMiuh{0O*y$WanQ7gL)TI~s@Ka>Oa&&u+v)7|jE&r`BdLucT=ind9~2EgG@Uq6E(6qe{xI=eNGU_bf09us}BWSYK|Wt zzm5KHodCT4zc<-$dh~sr@ztX2frny|CpQeUNU>vt-p2~i#kp%Jn>urSFjba@DUI;_42ASGT(e5KX6fGFfBXd1%VfK0P?5G zTB+Y!R4ad_IqK0YYT&7L8QPz0Ldhr)vTZ&TC0P&lZHmd%6u`KIB_QFmBYhuGr3J(R<+$hP0*r1fS}DOTg9(5N z;@b>gXdAA$lAhM-m5w;cE9s)D!QbCd)$Zo0z@yIheVHmn@LuDR*Rrfro53Hd7Pi(+>B(qE_wk@xA!D0WIBK8M{FVSEWK(nVnyi1l z8r61981$*jw4gb4)Y`06_Emf8GaI(GEMfvdV?&ktMMD+GjO?)MZa%?{3rV}GPX^o2}q5Yyf~=~KkvO9g^UOoGsZX*|%P zZv5&oMR-W)?F(Aa*J!Me#xxAXZ{XZ8HMF!0lsBS#u;+$`&mM+ zR~c=~Sb7whdGmA^h1s<)KZUY6e5iM$8!>V-VYow*(+E7@f7wIU`EZV*dbG|PdVj)c zg@u`YvoDn;#&LZgrezMQHsRc9=YVE2Kg!?P2$`~agbd*C!mrj%y8sNPMAsaY(9WJ54oF5yB zm2EaMk0e|9IKJNfp927gBm_;r(^73dy;5{$DXDJ^M-2tS5&qJL5&B8ZF5ky*54yQ;vY#t9N|b89Y4blZa-&g}g@2e%=gLz4@#LW0sFUo{66SY~P?c zFuY5`LGNtqmZ@1S(8-_#f03=RXSfU;c58E~pkh8(PuCp~vC-^%eiP)MvjG)+?OX1< zcGu9hPG`AZ*vUw&!0VKoo}qXCvUZlcVIkx(OXh57A>(}Sru^QVOj^+V6-TRY+ERgs z#R=zQ4}t+3y_mUk%R(O4FH;#9no4iZVjCOOu@=3d>1I4gOT6xp=|IeJ!6Z z#KIfNoqSt>;cU3GCVIj*V$$NziRbq^9hXLo9j6+FT6H+UETiE?oI7Pw)a&bf{r5)& zEvV;X&-Q-hY^O?ECv7 zK`{=iNtEs9D-PlKc5+*83XkF+Tb``>0o)3ElygH%&ia9{x4W z+e=EAM$X51I8f(tNq@S6@Qn=1R-L*nmI>(}njp2ss5@ZaFZz}z<;Z2iG43%hrP1H_by{m$Fc_?hhD5svX|WZ;Hzt zmkyAirhRt9_SFu|e-70S17Ah@QzqBWXRrc2&*cnLvfi)Y?rt?(x__XMGL`~84{2;Z zO`}Ti=*ULef<8{R?dPCh{oFqX;_KxE*Lz~Br_(PF2`b)scFECfB2KHfW)f<-lD^P0 zo++|X?}WP!Xk(`RQd;_s9gjM>sU?UilcHsQ^5TS*OV+s-AS;{v$`{}OJSG^;e^TVZQ>H;f1Y3A4Nw}?a?XB&H+Bx?y@Go9T({OI#k*y|`S{I~OU z&GCiz6PmHK(FZROH8fR2vXWiW(*bFty4S4iRJFRn{?ZrMZy#-U#3keltgaBTqN;>okYF^F)y5R9&^(=A-9vy_bV6Ux zQ+|4=a5Fkppi%^+Z3h`}gw3Nd)Ma(=xlCuw+{6T*uV?kj&g|AIo)gh)C!pPJP}_pb z9LR5dEAKXktue&D4EtA+)b=tgR#Zd&kt(Pm-+#bS9{3JYzq)trXJ<^{K-E2bxn0h_ zWZCP)^KeEeT9L<&$7);$F1Kx*G>a`15)TWG+3MGkg%mv2I$(0_={V`U zOPeAfkeTCoODI!9oe3&)3Eq6M88>XIbuwd#NRCY?H?a{22#P)=H(wOL+4}@{N2s~t zT|hu`!Cs3sdpNJzXeTzD)WD`tx_tVi@{b}Y}tYCzt`i%D~6*ArGKsz9t_ z<^(M#%t54B$o&?@&Xu%)MX)Np?R^99-EmS6(0nC2jAZ#W?=*Xi3jI($rk43&e4NCn ztT49Fp>Vv05U)6heS!X0{65e^FZYjeoc7lvJOS?qa7)qYw}s3?b8aDt8Cp*5-4&*| z=mRQz-4w3_w<0gwvFi_uly}NnmRpM?+AZSV$bHHU?UyiXMwKzIR3%csvZ@Eg3_YrU zj34wo5c1^ga89h4a&IP$se4MHLYjI0IC%Xr2>=GQvG)2P$uCYjEnmAjZfQuTU$i5A zKX7hVsY3IOBy-X>B9dC*>i9ZzaL#mWJ^NtCS;ilVKcFi7`+#H8;Z9IK7<-MbMFUUF zhWhPOsu_fyN;;az*0a0RK)6E5Q_&Y2^$rZF7P9^%P0lMClnpErN`5D$NFQU54JG0`fl&Um4L|!@ zWZLZaqg0wm>mM6FzzP7;aZBeQLd!*xXB*%+o9K}#0HQdlUhYd2?npRxY2DHtL)Gz~ zUvXgW+3N!B*stl@bxm%0z%(kJh5m*({eO$$TNwgdx*1QqmZhW0O0ziTuHRhgp6Kp7 z&lfPu(#QNs?~EQ$lIOMZ$`$a@8jwNLuQhxL$L)vCH8_ov21jt3`qQKr!c;`XEA$>j za-o}M6@Fih{Y_B+dV(xJB|@s=hTZxKPBAcuPs6+Ujrvp)vAp z7_6`?s3gz&V?n4MA@V3oi}oUsRs@Rup{~Ui#427HsQFfEn{P9>ukQ97*0x?mOeVNr zy^or8n?Z1n+mZ}be|^QHrtPdDMnvrr2uxbgGf_M7YPX{JMG-IoTV*uSS~o+}HC z*qk_N_rj6M2#NxMIxxLw-SfBVc^&b?BsR*M zvb)8T;a1<9A^}=}$L;5QMbSpy49vXE&iX)M#uL{;k^N0Sw)kKCofO>5rghFQOog0p z*i8Jf_^FDqi!TRI^U~DXCqT0(rD+CyC^f80@~0$zI&Vb5^}JM4yKe`mJpfT1u+?)^ zCx&|LqZJPZyN-P9k?+@9yrb|UkrAE5PvKnMkYssU2I0D2m!p;%z*D5WizYO+0h?^A z7dq<0T1V;K{pyjSLk><+0cMx&ic})4n3<%3{1n{p!H;ZL5<(5>I#8_jcXk+U-NUgY{jWmB_iyZh6uB*e4;Wsq|)RM14*Ln?64`~tHsot z0U-)3ZME;qi5A|Y{#qT9mT#e2rbY@kV6Nsm8M)+f zl1pFbB#hl>aU>(uN#TA^sJrx4!yKUJV$dNuOD)ogCASPTQLXJV6V6|7YMgI%UcAh| z*H4eEgNbh#vQVJK+Fwxm|B?xWa9dF4dX{T<{%9W}1DriQ9Oz zR(p%Q5)eCfdSkTsSD^>~e_A*a5=(~MGk zn|$gf+2Nv>042YXzb9e9TB}U69BsFje(QN?P`NAiJw<;bO#r$jrkM3qKE66W^dp6i zSty;apWSPnZTUinp~h+7?j~db9r#SZR?YUQti%xv(wTCwMdUf+#=D%mT{4qH*_~Qt zzhx~Q#h%>!V*#++<2m%%I@gfWW&Mp5f$TQUg{-HL`pitRV5wr49V0JbUfoB?8!|t= z+V&G9Og$n!bU2`fX0j8aU5>2?+B`(?#vP%M;CK;EYSAQ-d<;m{LAb43kMRO(b84$; zbJ2-bx&X)4c2@}Kdv+AwhCE3J@C+%o**z#>-_N{OGoQX7bfVh!j+5eY@3+)_(Zm|u z)2ec=?5&NKHl_3VPPbd7#S_WU4|z)jYTI*8x<=wqKgOLUnur8#5-TyrCi^IU@2)&b zc+EHKeVi?OLYfH|OCdypmsl$L{VwYQu8$IFRf}r+5~NRmFb9mw8c}mkjJN%Gb>ivc=_o)x7 ziI?Cd1P?9quP&GQAI-WgCBG6>@O>T@eKPmrF&B43BME)qB`ZSme!*!hbI?s6&8mdJ zl!DeQ-F}O4$Q03IePT6$6N^MBorOG zK4edBIKp)lmbvM3&oR))O>qoaq8Y5g`l_d~#Xf*t!C7p7ra8-vJQ`$NuB$k| zA7B1{C2hDQ`d0L*44=s+x5W?Rf`e%E`^5m&UWVJ()b}4wyy^LsBd$G$)}KT*=`GHVoG~Ev=&M(J4YJq9PoSQs8W8I-SvXG3hTGo>G=vuo-_p=t+oou zquA73tJ|PuSd14b3_QR>LHF{TD%;riQtnnUSBng2X$oANmr1DCADx-%VcONPjYK+;!ctrI%Ax^rd z*qnMsZNeJoI~(+VesxSKbmK79gPIyj`=SJ%gPSd2mz6pe6OXMK2K-2l$Qn={083~* zs`DT+dZ$V{hfSFSB0%cr4wTE+=sC&OXKcG)*XleGy(ur0UViT_rfJ+GcVkmS>EXn6 zPqmZmeett&!AK90m!0pLFj9}P@AMMk;m-b^^CVbZe|Makun%=(Zl&~^9waM=Qhyu1 z`}bMZR8&HUO^bCfL$2S4esrmI>pBZ0nY%u#zDvM=srTv5Q+kK0(gNV`hYx+oSbXam z3TwEF_LaB^{WWMR9^-m4iA(|ZwLPdq$imPk`w#LQj& z?st+sdTXKv`-C{6KaaV+PX@k5zF3cY^7J%}{q$;0e(PXojjnNar&Xp{$O$Le$?VxZ zn{NcwUP0$4`KOVbr>DEfPv5Q}ip2Bj`kFJNHY^;)LYz6`|J6GTR+N@~txL%y8B#6B z#rT<2rJdB#sOMNi4g5%0@x%Or-^klmO|jM`!Jj5aEaxh>`(N=RT*>x@93nqG>-EwE z$wta@V$c=~*J=3jS`dM?KV`PX0|$Ux>DOb6T;d)5Nc}+hLjv3O636C`Jbm>jn_R7j zShlla5;41KevhuPDKaXc^VYz(5D=u+gJJ(w?{HAIW&1;{s!~-|;i~tLm}bSgcXtNo zx$S40H2)Q%!GFqr*O0-T*BBF0b13s1L53r#19WO0D-!y=2s2m+u`lys(y$vRn95|B-l3J^9HzR;i_GR`UKbdNe-Nca=Qk6CNl9jCAlH}5rz+Sqa z#kak{5Sxx`jc2)RT+_=lH3PM7;b>HXnCO58=`8=ssv(8K`qX((8ldGJq|U(*9vS9 z9RME7irL#foPL)*h+IKf2F0pS4aa62bUpMcSB(YHMT`2DbKw<8j+?Cn$X%{a2fDX{ zP-?fPPX2ZP*jK3?BL%2a$NVSLZb{eModsw)5c`7#F7~Olxv=$$tF|Q)qd8iU!_7jr zV)ckIi_A1e_X?tltynmrvRLROHgCj%f%_zILT8oKXdM&%d<)dN1pd0MJ)Kc=FwgbHQ=eyxqSoyTKP@x6o|+;IxK&{Yz>oV`y|@tq z6f7?NHG-=;^QYks<;OjG25Dlw{HqekGNoE=iriWGXEkU-P|;CTW$Gic8Ej$mz1@t| z=vu`La=paJFr0HcIQ&@U*(v2Z(wQ=6?~J)h--?HB7M2yh8FS6iD~J&wa^O@q4jYnH*bh?fiXxRanVE&FMR?^D&KM#; zx#kW)^jW9`VkKLl{&nQQS)!O#=Kz$W0m_PaY5km|mY1hR^Fke5UI2R3g{s$1I7(N| zaP%>EkE{zJ5Bds-G{)dLr|esq_fuk}_=@%e{YuNO&C#5-fF~y9 z=t$8ugzLyvUQSWcrqzb_BXDV=!?9VMM?F-t*yznS#xsp^-m#v`D^EP@(Or47O<>`M zrz_EF=OhUVU7Ci0lZTWxo7;BVF+7YdTr48FWB+|>w(d4) zv0mI-`QIz^Ut`Y8u}=*N*=U!LK%(t7WT{%NDW~eui`rx3q_b`6`!RD4>pauj6IukK zWXQ3itd8s(qn@E-2RP?D5Dbun!(CVU%VB}$j_&mdYKICex6RwjG-F~q?2VlLK<$Ow zH3j)6I=7R&$~Cn%L!L~uB6SuX{U|tE_yR^FdCTe5}vFiVDlV0T5j#!T7V?|QK z&Y6%{{uVF1>1!~}!gc0F+&fWnykm-uo+nD>CJ4VRMJQ?ajHCuezs7yufe)L5d-}~s zaCRS>y&r+QI}l1fG3Kj3RJE12j)KUV7$$BT)mUK07-6?&OXGL$=#=|ER1tvY-+Qng zcLk2mwa)nBqEPn=XX4!Rq*_FkJjb=Bmz7F#rxpEEIH+Eju>=^NASJaW!dl4Mn<&;{pNnqOUXMKDwlf0!e_JeclmA8rz56Y}T z1Qeg;Y;{*`(B2oPnHW;My$USM>a&uBGDn+uf`QfD)wTBwt=>gFu^SpU=tX9E>wXk_ zw!@gy(ractpiRG(Z~w`ey|lLxjvV`ATp3$)q4z`+x{7!lthesI3ujg9Elkn3$zCJM z?n0lDaTqzEL=#ilM*#KdGM_BEgBiv^om2*hW&k~ft{b1$l;!)9=VO$DwuR+JY@CIz zef#6*V0wS~lL>ct=3MI&(lV=3(X`$F*pyoLsF1ydx`&S%FhnF}e!*S+H1p!tMx`vZ z_j_s4{hm{7)GPONF-6L!t&u|uKW3!2N#aOQ+7X82%H7{afwtpg1fnO_(LLQ|-&9y| zzzkuHL$nC4lUub{Ed_JRtx9=i>F~&lZK9Txj;7JAQ&Nh{arp&&u%cdy zP8_YN=nfCDN@-=+DQKj9%AJ64!E8c6zrujz?f^dcIBlb=MYTVgt@#{*R{}~&DZRsm zU30_Fzr-U2LMds9w&;B(8~{T02Fw{vJDF`&SbBZJ0tXB#@#%!OL6hBjUTYx?jU@`3 zlU&XhSLkUP0`0wv znIK547Pt#2g#sKB?=NSWHRV&R5$y+1Z4JwXw*`9?h{eh$SB;pwFvnXh>kgM0lxpdR z0yHGsxmZt3`VVjeVzjZEgIo7MJJs^2hON*(^zzFPI-%9EuaO#aqfic1~(4B!yx z8WgyAIq|`CRTxp>i&(MQF`{jTq3DeeZFFhD4Iik+03$)n{yeNnKYI9hKIL?T&l><| zH3oQM?a3)C4xf`%0hsoPQ;|n<3U#aSEPbQ{4Iu3SHUh^$f87W!_Tb4O(aYcz>$1@r zmz;<`#^|5Cy{}72v_@b6xc0z%(rZ;q$|E{5a>G1Ln-HWarC1fzH?clh1UEF}DJTmz z>gY~V`6fV$UwZG5OJc9kc4m!p=oLu>sznayqwI#xvg2=h^$nkP zp$rA;F0`vP;Q4ygO_@>HB68hj0%3vu`&7B zC`QU?VvAi)5+tqWsYMCqmpHa1`6QM;@TT)k<;ri2+Yw4$OrGzh*)t5t+MDG*9;`c9 zi1*5eBIJUAU0DO--hmV3Nx0y?LT~p1b^E0plOx_Ei9TlK54`6s>NA+=GR08a@6)$x zv&F1Q_PVtUC924=E8fYHp(SI({res%e`|tS7SkQa`UYP|L{hr*MRaZgFr~??f;$No zXicE;I9zGP53vxz2SHM8jzW8A8^bPS?I=*F0Xe)b6aK9EIBr8=AyI@KKW8wI#I!fg zc}MFYC}{=XY|%!#8qY-_XDcfV4Ey~^YVs>5Lr4?BULQB{1ed#4Kim!Cv;M!BypwQ( z^MX-Y?)vpNxngJb?6dw(H)O`rABJfTzS~a$9D&H5Bqn_AL=KY1mZCb?FhJu1%Y@gt zlS=nDVbkl-B~<`E^K))raDu5JS$|qG0tPs(^tZFL!OPgS$S*Esb&;is%4xRXjDp_` z;Y-zTwfES5mVzzwk{loF(&dNql=-aFoZ>`5;;e#oQS#IAP8+(Gk_n9@;Y$$EQIUC&OVT?Z95~fh zeQBz!ugS``D9Ni@+QX}|e%SR}b^DeUMYtmTQN{|zUUn8c9U>Z^BZg_MIDfLx>p7z6 z*)wWVn_j2)#vLI~A5Y8e=w}YjE2*0?mczmE#4_M`e&!Mm(+sipsmuxX00#~r(B1y@7G32Z3Pg^CzSmHPEzGw~h$FNmdj|y<3OVO_5oPm3xy#WG zwvA4MvGfjvt8&`dU&EXsqLD^$`ue-sixQ<;eU2Q!t>y!A_EzFW@|vl(9p0>`%dSW7 zTUT7R^Cv5SF9zv6S8-P=269s9T1pLUyR$6<2RWrlgm9iF;TJnZZD4QbqRY-&616SQ zP|8RPdEYKKI?y#jYAZ!?Svz+u2m=R0R%aL|T^N;o#qt=E@{ZgMGo+AA=5yy4E_l57 z4KYy#dB`}>aZ=-shFXP*jo$0tVSn?Y`+emwhrqist=73Mj#A()QOp6qOG+V+RGdC% z%3wXAya{!;sxpb4Fr(!jq}kqlc#Wl=p{`ky5ms?=9j*2QPFYg@sMM$9KILM^YNkQU zmonY~9ONCH3b}uR_(nzn=ZixR*oysgeY@m0dyChwE;d1 z%AdirA#&2}o2f;oaaHGvvu{5uNud=Fd&yN@E3;$OpxrzYHQ968A@uw7;0+&H!?S3W z!H;Uk*pM5O_ElMQ)iNdKr}x=tLPO+JAyRU7nse;%q_Z55LEhDM-!nl=4?8TG-zb%q za}(gyy!nuC7~V;U-hYR$IQ8bfdPMuGFfn~A1D5OQ%M-Gu%Ol~k(fzV2$v?nS19`|R zxu}GtCeyK+tI4LDsrIS6f1x)H!=6LLEN+U*=^(t{tVu%5C zHs%%WbuiyOEvEArCtW>4~1ow<*xtd{=!u_0|gJ*{FO$0Rv+T8Gg> z*k6|f%DPt9XG87&&ZRnSnnx~^uFbPfv_bts34Y^?8&v^e_`_X2yu{h8OwPs6kj0Fz z4PkDN(xUXJnneLFAYR7W zN?f4J(vgWt8Fewx{Mm1If@*$6mR@Sf{*FUdcJB@rKZ#lgL$x?$V9 zs{=NnC40tbedAoDL#2b1(ar5^=eYa%X{OLw5jKv&7in93hezOicWuHkrE+*F5jJCF zCs-k1k#rPCt1@;6i@$d{L+*5JLTRL z)j<@ig^$x}reFI^6vlD48P3p{zSO*HZPBqpPG^8tSFbDt6T!#~sw+zU)MeC-CxnB> z$OfY6Gu*+Zld?>88~4DOo|O@qyP2ouux*qJ{c5L`kMoD1;&+vaH?eQ8gBJv+Me~>^ zEB`vxuI8>dYz-cVZi$1draqu|$akHGuy8!DF%uZ2E%e?Qi|Yw0~{jAerzJOG&~NSQj`W6Her+f<+ucb{{*%|0}@3m;EYRG zq~dU7SyY>CW}IxiI{*;`yFv5%Hc|+Cf+G$DpZeH0tFGU;8&j9~_H=j#y2-Qjw?a6^ z83LGaYPwN_vwO#~>)*k92B!)y5E;uDY68YIFDH}oaQg~LJoutm&)Mx7-%LXa3!4@}<8SB3Q6S0eSUn(9rUB&al;m z4Hb*uIlQqeDSW?0SeX0A)tHkj+c@E#R7Pte*$|L z<66whO$pmpI}P2COYd1VMSN*_;Xofd2!Iu|c=mhOn=YLr-o#lwpFLA4@JxA9Mb;Fy`4?HIMCe(z z`C)dMQZ7oK8I3l28G5P#bMqj>B7uV}&8i<`1BF-aiL|T)?pr({F|7B(1(bYLFttdM zRH+Nqq*vN+yS(1pNh(~K61#JGR%DEDnTSXULk5Gq4bE!ry;^lZAS4|w@0oL%vNSv) zxFCcRD{LV4jkyo!4Dr7rI>n75OC9?fjW-XT%Mm;LWraX1D_{GRuA0*WY= zu{cPTrr2;Jf(eJ~8FviX`7X5rdZIrI^$s8;qVSVl?^N+fT(4i$Ywz>*em=0Vvy@5y zvz{Bj1i3m3tVd%s6g%cbI*|4uCsoE2M}W zvwxYqZ$jI9tGSP|)(83r^Ey<)rDn>6H@>f3e&0T6w9 zuW>1hT%wO9q8`{kY5*y)m@R!+d>wn+-Tl=#uu+`?Y7lxouuNw0Cy%;0ipa=TbZ(n9 zu>;5VpBUm}sBr{q&R7_1JsQ9?z_ z(sZ#@F0H=yz1;4xtybY7dn0%t9m-#==XrCEVX7TQ(?s`8p{BzTDa5>%B@y;pEmg*6 z$b-kTx3j`9tEtFKSM~Y!Anmp45#w&r_JDZs%Zns`;P8APr}P*0Uq{ggNE?t04~l}n zW;J!Fxit^L;pPCge#cAEf3ve!ohy`{@#$x=3nTp@lzD*9QhaFEvkw^q&$j^QnF{yw zazsUJMjbq>qw$MzN#%}RUQ2`J2FGBZVe&Dbe1!CWgKv$K;P9asJn=qK92MS#D6te8 zkn7vn=@~bgmtXDld=bzty&b{8n9$ypp?&)fEe&C{lj05UM4MBbE;Z%SXU=E}h>F}* zzCB+zqD%QyvTo(}Q@J!aD=YvkU88YR!YrT5xiZR)z7`&^ASSKn*wjHTHI9w>JlzW~wmd2UDQ zDSfuLtAkKdJWDNCe&Z?OV#?Pc&3ax9b%jY~Cojod-hD0h`!MX>gKtHh8_C6OHejxt zW|n3c=S4dUJDUmTU zvv(8h4w*mEk>c|h2vbUE-3F)0*gR+4NUiX0?Oo`1FAsG$dV!4m=6A_KU8Tru225Em zcFR7L`lbTWoLCmhSbcc}r`G4P=H;HtHMxZC6YOTW!6Ge5ms?Ei(PS4*+uV)Qpg2S< z=``fEOWF(#oNLTsl(3O_qg}I0cRoP~LtH4w-+UTwr==TE`o8+5P7VHxr4}x?I)_~T zvJ11cdIUwc<_ud?+M7KLmRl7&@?R_EIpW)Ut_v#GduESv-%uR9_t&7-JxBD-i3v|= zodF<-|2)Ni=e$&!6j+WvIRI!A*e;Ag(tnHV@Ucm<6=0P!c&tT8wOqb&j+ULuEvDUrOVdkEyQzRg!aqq1K>P7?K%=Xx z--<2cbVVy0Yt9FI%H^H|CQXviz}ZZWIf6ibFd8UP2ZZ*xKdi{TYc6^5r+*_>0$zAu z`_c!MzIv0Bp1zjB-O(-_Bah<-zAcE1;5M$%T9Dh@fINBUW*INK<3Js$|?Hb8IMd zeO`u|T&xf0GuOCv6d1M=Q8s5_HvbY1l&20TzY$DLfJQF=Vaz}#SfI|%mC~w_3mJ)W zGu$-cVm!{TmVO{6qb_GRox7a>W}0$m=C1+x`LG=P*b(u%d1ghU zOw?-hga0u>KP!>n06dO}%oK2lH3!Gcs|R7wH@NDZ|8QBjxYS>xpy>ij=5WbktndQ> zIQ4&w{?Bj4hp%v^XT-U{#_Z`Y9`3qaa_oOjGWVqXYcajyDh%R_Jc#qP*QAX2t@U^Q z_Me9(a9sV=EXtJ!L`f9c$5lx;`j@bXB%gq|5;F!YQVfv6FZ^?>9n4FXU&(A6`ceyIVf8o^rR!kG{W_6t^)7aJ?UQ#>Xzg1BPBYq-_1z<^F{Gjdzs( z*HtFRK&&#f7WaLKRHH% zs(SEc%M@=OtkF|0BKzV$m01DwRqnDS$76|0?>@Pfvod&1q`l3|-l@#-xxJepC4@Y7 zm3P9MKyuO9-cfNi=@uKr=MdxSIqhv`YiqlD0_e{3N5H?C>dza~=pXvm1KLdW5H-7U zbZ!5bQ2~meN|`D&+Ty@x45dfH=ZVJe01>748G@X7jZ+PJMdTRL_dVeGg(>)Zb{nYLADbfr+!Mzou(Lf<O}Sj5H+i?9Lz!=0tY~V?+qSTZDyh${zk+JtRj$M@KJE|rg>z-?Kg&CqT^5AJ{gjSLSb5{ws z`{1uB1S&+pVC60RYq0R5T}BWDbK(n->$nkuYm|v2wjs8FPJso#*rwb$=WeSxHF|^iunQz*aI$F^dAe-a_H}0N&lA?CC#h*4|_w-m1{Z$etCf0 z**BSATL^G}00{wr`l8ZN?4eI7#kPw(Hq?7mFGSPfKQUqWqd z?MvkSlUWagk4RQ_PR-2Rs{N_@=S`}cfJjP7NliGN4BMXuNd0isfQJHbI`97em4NAJ ze4ABs5GhpU&a+DQjpmEt1quc?9Ow>p!9lCx?Z2ee8ZPuG|1?ZXSU#d+lgs!8o4l(i z54kZ50-h*zc9)!VdFQ30fNVqKYyROo+@<^C~# z1gJLxW>?Yu_wd+dpO_mPIa=$D6UuqA^qZ1&4z8q`@wueW+Kf=I2 zJ8ne(_Il_K&_peCl=aI!-Ylk(0K`gD+-G7-U zI@k4CFJknrvMhlFNaI% z&n&?H)3LH{d()RmV;gbKX;MV6>po&~>dtm7Vea|kn>ef z7*4Tkqp};TcCVh0tsoOcedInK)ShI}e{aFuPw$xPeyO!Ee9Nb_Q3-Fsr&?b5MJ*%m ziaMV4N+0019j3RP8rkBLk!bUj`-iqQS09kO=DDwr#9z0T0BCCCfC!R|`w)bxwk4G`VBPppOC@qrGN(o4bfV6_NG(*E!G%`vmDcvAB?=B(oFyHw)^%$VACC->Dj$hc-^w71r*?-8eBXi7$;f0U~d-Y5ivvaK*~OV zg*D7-_|r7QZOfNgorbBrUIBv^Y}-sDIV0wQc8^+j**ZT7gjMvd)hfPl=Sj*dVND-zTSgC>1aFJ2-*hN4HpDlD<7Ts%ah&86YJ`FLTQaK#Av z>H2Oe;q9=_Hnfb!^i}C@AP>FYD!?FPQAt}oQX~gLzxV?fKTwVML*d$T;~a)R{aGLS7TfM(=9fOdyCV-sf{|-XKb7;*)azED?gxm$PTH;MgRVE*l zs}zXws7Q6S{*>YfZs`g4o~V_I;u)DODGBjBee(oRl9+Lyugg?8*EsQHrKj=t9Tuqr z@0w)!MFai{Xywx4O^ zPN;#pql&rx#!n9PvYi0tZ~-7K|KT<2?rX4?3IlUsMojWEAJ0L$pDyUaR^K-whkyIW&u)Bc-CvI4y(L zeDW1=`fNna5-J{kbO0hJ(muPerV_BN2tjKOt1p}#;>2gu#s0ZiW!LZPvc#DA;EwpF zk_zI%({9u-2`cGOxl!Rc%KfKzHp|%WQA~@aKqTb0roho z8Ag0t6wF_pSUAn>?f7Ds_q9tE1*#{uBEQ_WlVeTl;`B%CmGDO5=523m<=4;Kv;Zs& zi`T{|Ztmt^Ah1bypBTNh=O^W+;QMX<*?CZl&)J8Yhoa_`^-Cc%5R1m6{$le7aqN~o zv&)s8AkpCfU-t5-4lQ9`H+I&9{iK|W5mmip4FGkGuiIOMviAc0`dPV4v=YxW^b@m2~iQ%`&w+-f+LVjStSGLRyZmf~NW%MieW4IN|cZKRYb2x2|>Ko}No$MOWa*i>x$ z3-6guW}Wqq&ZD7{gNtcSY@lYf2;p$#SO_~>s_0vBfAQ*Z(q7U8>fjGK!Lme1OdUms z2?*zY_kT{1fdH|YNlD7vok#V9X_LR_ev&$wsJxHfTGdXm z;yXRELcEIST0PWkIW~XG9>RF8XFH{c#B^^pm*@wOO4N|L;2!FRe4sdm(#p;~2(etL zICxMgKe6Ao^uX|&o(v9o2%3nTz_%Fhr3IdVOt89Hzc;Dl@%-Q^jBn4_m>6Vy4K^J{ z^%rP;^LsBw^H~HJP)00D zklQ<_X$fEyraLb+nNq*Cf-BZB;jSvwIpEsmiE+#glr&C(KmxH-Dk(l-^~Gl&2>{wx zeQ5D)e+FPAa;Rky2dl4d9dE&LPMH*}#nsq9`~N&Ljy**W#h@cIG_0)jhOM!ym5M{T zBk&+Q^AG5InJ>#bv&4{?)F`KENOVo*j`J48t|5Qu_3AqDOughS-0;4VVtoPkO95$E zZoCF3sN6ehqxf#8=PYKXo=)hK@L=)DM`JT7@3V#+gQ2nJeJ9Z1G=(dscg0GV*e1rr zJS9%o>`nwU)#A|9)ga#ouEvZm;lyko*68;6$aZ6D+6_o2A={zMy^+;pz?I<>x06F> zAQ-h}S(l0oi?h<6H9dc73`x?K&*Oi3dKEMTkHw@=`fatW-S33}KT8cMwECgoX}a2( z4eR)-rmx^D#vaTE;iXKt?!}32s_kNSO2-mY-j7RO^K4nplLPvXrNm#nxISCd%c>XzK6WRW2}D$wooV^7-}o?qW%IJ+xM;5+ zUd0%Z?G(d=)^Go0T%a`O@X+ZmVP=A{>g3Sc=`ZubnvE?rHZm#cqzP1zTp?0S1DIHB z`4U#=>qpZVuN6GaXkC7PptvmaFU1jaX3<#<02Hx$%2IkR!T{2BMt*yGmfhL-rPOE| zu4Po_OE&=gwuya#4CRSb?8UPW>PVWR4$Z%J_M;#$A}~ z?|6$6c*sHsi=LRLvzrg)!S-KSN)i&3gTHV?SYMaP^eq_wWWV3?uF8-x^0<)XzQZ*Ur;l zKJ*gklsx*?Zj|+-)pNEPM>GEw(_N1ttp6xBvP6{Gx;A{y0Q@tacn30Yr7*o_6Ghk z2RjkQT+4L*K>anenxcR?I&?qX0SLT{%`!RSw1kL%8AWXYZ*R9G4%yFB57Pd&P9Q=4 z`Y5`45RMedu^$Zy!lv(PzypnNL&h=TzNRMU`WF{r+pyRw-$fc*hoviI7r_Pn6+t-G zyYlgDN@wa+&jPFEVO9BEN_W2)15~t#T0TAQ~re}gloC%y8DcU*#eJX2ta69$kB8-&8=<;;n z6#*QxLCfp-+V7z^Sk zhzNX;BHyDNfOFq^vLl3HR7&4b$~Bws4dt745q5thNbUAh-p2O&EvgCQItBkkKCHrZ z`{1gu7gw1lD;2OR^?Gidl=Z~TnmvelG)kt0H?AFP{gZ+Ej&P$Xzr?c@k02RiuS1F3Z^4u}veapK7=uGD%DF=jB#w6|zs|S4 z4U*#|YN+NwF{;ftW%_i&E9Tqe<{q*^6y(q`=3<*|WBYwP~!tq7NVxiY`&zN#4 zlk3#tUNbXXep(%(<~GJp$p6+9B!0u0D?cF_YdO2I_$4-Hj98X~lcIfOq`Kxk)f*U! zo)30m$XeNTnrqQ(PfFwTc|1{FbmHJ63wSw2<&v1`7$+$;_*ws_fj`2`GxsMp~AV*iu*1y7CqX8_+COkF`TKXf(T9JA}3 z-2oyzt`KB$x`D{e>QxXkz{~IV-m*yIV=qx9dE9GdnFn+*gRB_%$~z5R1`00q@uShz z-4W$1@uk!EPMF?&PCCgVEidVktp|P`9-)-b5;WG{7n;}rlwO7XJ`HAth!6R4LEAB9 zQmSXK@8~E#h&;(BvknbaAZFtc4q~z|@5*fS#JkiNeGBBVqKmF-^u0RE zuqxEE{2n_+w$6eUH5Mj)}ZyR`_*e!Tu2+L}VS>-gG!X!?tla{4*WH!a8KX>X1AYP&0>v z{FVN?&l5nTvJ1pQ$35IrjJa%;bb+?PN3>-Vxm|+p3gL^%a;m~Nn^L1+s8%?Re004) zsJGIu-SK#O|GVB5kJ1$Y{^er@is?(vfQ9)5ah~$k>)^7-ARRlGwEP%K0=4?qL_);`(}2*oJ7A_P1OC zeiwq=dC#~pQ0!h{qR#FH_<<>fa_mHpdK5IaK4bB_+3v1i$;XMO3Sx}3MK@F51n$hXErF`4h0WuP{aRI$p#f)JK z$J<7BZ7kQ1{p)CIq1I-GN|)aUIZ&9s*Z|zU%E}za->8Usnbkol^%@E=q90IDUY~iZ zW=9jkBGq57>XUhW#6rV19o8qhq?v2s?Xs93hxS!^5c;6+LGLoM-p77kz2fIBj!t3@WuxEcr$UpMCUUE_erf>c z8qdXLyY2H&+C49;UHCJse(@tM+KyAHonSBn|9&YD-0i&f=(dLILdWS3UIz7JXIigJ z!_~4KRt7WEM>F>C!nD_diu(z{hBH2M?zO6W52pAuP4;fRK_+trO{5O84KBS$by0*F@(bQYY*N!>jD%jR-#01Ee(RwZ8<1{8yFutGxEo{hQ(qO{gdJ%eAVYS8v+o^eEZj{$3w%$W*O`V!dhhfDQ$CieYl zBG>o4E4=u>Na7q>h~U-~!TD%NR6cm+w^`e1T`1$F!q{>UfkmiOLH`u3kl8lI`>4~c z^nZ&Izm%vQAkJLZIL|bTaOKf%^sAlLST8)`rGuMTAR&(YeC~YOdpvc!X#RK<&NPA^ zUcDN}CK&|RgkMA9*TuDcu1ek9KXSbP;|n6vMG^~YpnyvLN~?>x-wUf};eB@~osT;I-3F>SR>Pqd)WpBjt+Xq0#CcV$4eV8=gW zxQvQ>mSM(d^iU)ljC^TukCPeaL$P$5AKDSere4IH>lP%poIV&Lg^wd;$p3SlhEEeC z7r%{}R(f!>OQ zaxeUuPKI8sZ|*_87;vglAJ(Y*zi&*SQn)2O-H^`qA=>Ts8jPPTI$W@@EU@EIzNDS1 zNB6V^L-fkSl_?r=hCO5hiHIO(<^F?~Z-#BFd$7x{?|TzDR{$P@%dof^!*yAdi#CS0 z_gJ5)+~1VP)YFd$*G0p%Yp(%hmBrCS>?yzl5Uma}z+MFp&w{uL+nqYwr41I^PbW7r z3KeP~A9QP|RF+g>AKmd|tPY z@14Bjvvtz~gAQ9AjIc=pnwkt)G_Rj=_gn3a4Y8m91nvJg(l;=1$-7`~t2wtT1+`@| zbl6s*y5oR?9lBohL~v-C3}0yQ&w%IeNu{P4bt0$F66-5A9(zrL^Et2$;9ku_zH;?Z z0ktHcosfjuP}f!)09{tIM%J6OSs8dehLqCset&slu!CsyYRt=Vs7LozRyY4`zb{uU z)}@#0OO+17Fs(KoH_WGmI?<+95z;9)<&FF5hsq=d+;_jD3fYIk{tOap0ka%{Hgu7!kLMh&Yq-#pkp;d&cmfg}FZ|FJIsS9GDdmAJfe% zCwVo?m*Xx^Z@cxLs|bi-<3cxjSV7koxr;_rZJgU%Gr_GIV(8llwl{Y`vPPT zEO?3tYozMfW={V6F7>o&4EWvMSF*%BB`M6?;l=+z1*yofdZ;H z3S7XbSmhh02>19$mU?8dTtVPk14}n8lEqK=p{vl)008D0B7WI+jaTx(pnsK2D{2CJ zcMlCiv>AZGr?)?qtXFl%94OUXy{RzPCh;d+dEc>5d7=iS)gr>_;z@*-;hhS{O}psM zXmb7?1OCJ6TD1z{;?~!aXAOjfbld@oQv;7)@Pp|<0oyev5-^^t7^`QeM@;@>%|P(F$>MkI*q&LNezJVs z%S@38>F8DN<}75LFrcu~>m1kqQna7%6Atbcu%K{c50cWar01MwVCH(wo(;8u25vU^iW!T}rw|BQ5aYLwz)&4~u zi}lxMt4Co1svq)6gz*V35!D5*7r6grd4K7e|9YVRTOR0_TKxa;jyOlu3+;HIadu$$ zWOk|`z@|B4WIv|&I4KPCKY&7qZwHJUxC#KM8nlbD^bj$=nb#4fsn-eG?u0&2I_=Yi z{G25_q(P3xzf3hwY;CV~5}hAhBGX&f=k-cGk*l=m4F-Vb{SHa`^`_@Gq~@M*!oMgb z*6%Kl-M@Y^joEA>UZt$X7xVwiYgJ9xr2i6FO6W){kQ?m1fc&>Vvk!6%1I|7c8rKsF zGOr8*F6Pga)!%kd579_0`2*^I(9=eOy}Pf<*znSnka**N%Od@lK|O|r03Y|gM#4wm zH;lLX-~MjVYT=NgT+7w9AoFVMzrMObIOwV(Zl&YB*J~o?x2}$)+|}Q26@)Uqq2J6G z6pUs46dU^JzZwV=KT2Xiwv;zuRe`S9QC=&rXvCeqTKsQ=^z&95ry=p4wtakNXA}JM z==pzY_)I_Yv%7QHcO=eCJEXw>@|VXkY9q`7OL`Ut=SOULPwf7L8T=0~ka)5F`Ulh& zf({=hX=ql~HkMlkwNRSIy0id}4V`e%ZhKb|XP4ke$NuvN>yG970DE&_jwEJ*6T-43a_qee2 z;~%IT*HH5SIO*n3L6$2aE1CNucY}6;Cie!l zyYJ5cz}v+%*#7|Ik!G){anv7O0kg%StJ{L7+)cB=dAH1E92(a9PV{*Be3VT zaXorH0W(X57|>~gT4>Ll2lofp1HH6K=;!~oPQ3ro2>pc<>tPpk-0|VG&b5J6Gw#(E z<6O&cXQSx`|Bz}kO-xHMy=2uy^qjyoyD^t_Ne%tB-TtAJUn}`wCDS$P()5qeL*!S?BdZUaSM>ja{JQo!Kt#fyJi zf6{XP63+SU)raU7ehL8lrrM7N(9WJiXluvCl-$%#x~*BaWz}?`;luS?kV>fEqx1M_ z;0Of1R8;c`3e-gWG-j5-P=D?6wt0ii5LklswqvJb?fz*HLSKBNS_rk~$oYkJi)tqc z9p|Z$JDe*3AIcnbCQzXW^4#3xa-TLtK-ryR!p3-Bdu& zxor6tfyBX4HrZG5_$r;s&;r-C*?4p%%mPSK6ELDe=@hF-pmOe~)+3pzUp75Bv=Y?Y z%~xKzr%`9f$`2{$CEnTft$Rnjb)G?}zq@u-+v4>1`d%pA3)y{0Ee^t9NG{zDU{gs` z{Gayj^2@t^n_rk!c35i>Sz#>5w6%94^XOHbY|Hb78;E-vJL1+!TGAe|ax$M9;Dz5* z8T20~Ia8cyB$isVm)rDlkWiil1w2H|Oajmw`Uu$R4wS=zEP>vPKUzH(7($CYll0;K zeqY;fo}-fH)+^e9EBy-Q<$v_IzP!|8oGue_`)QIEPzfSV`Po|czEA`z>=sN)a6kb7tC6nfWkvT4N2SxY3k&g7 z@a-3|(LPKB-(COGr+P$4`Iwf?z6Y~Vk^t_xD$VUcv0BMnym^^~^s@hkjGp;fZ)A+;;)Z(5;Up*fna!~?Y^Y$Ld?$j7e+ z9UaQdh)d)v-{bhcR8rKI`;6}JAEFo0n2EOG@A@4qw+gR;c?$ZF)%bFO> zfZ=!4PEP?1CZCam8FEX*R6_>oPB>t&i7If?@g;D;;P7i8Igj@cyS&JKY~^9I}y* zCiH1_d8>iM)*JP{3W+i}(csz#{BLf^EYy}ox|zxuPKBL_!#(grm!%wjC6b9de9ZH* zuk|AOXwftB2R|UD8v2V57v6Ias&he!0M*k3K6!z;r0dN5Xbzu-(G$Ot%O*p3i?pl8 zhr_u56pS4{hRmwTSeDTgfwAAL=X4wBD&<8$9vCd%e~6e795EWLkj1!&{`rq)m77*jEzn zuDu`Au#ZeqS-xrTL6M(Tsoeir{G{i?u?g>i0F|A3+L4Q(X&vYiA=^~DZ3#4~N>-BX z)MB)27sX?xLjIHOL&B6fLye|L5`_G1*`w`@uj6Oh#E`G} z5-t{3sX~a;YW3L3TJLR$#!{`GFc0BzN37>7f|wbIxEt#pq8z-hZ+ z-0FHFs)H}18Zg>yoRMDh3BJC| z1k3s_VSlCRz7TnXgR;Ols!o)3)^>f>C)zU*0C$Doyj--wPIHq%dGkB;UP<=+-6XWCJZ#cD-FKQ$gaIGG2Qcy<=#jG%;{FlyftWd>3l+ zYNxoC>Ui-b00wkIa2J{|dpr)}m=~usC-47ebG7IB;>$Z6>T1br#rQh)(EvM7SNfBW$|4$V9i)6b% z8PX|`Z9{$ySJ8`S&T&g!abvcV(kSi_pBQPp!$2eW%iKcEAglQ^mAdQz-8iDz9HpqJ z{&e%;oY(nm-7^>Ep%^N=LiK0LA#I%=$hbD|lN*C`jYI7C%sP zqFI#5yR(5#(Fr>}!rnqD3b0l3`l5B&_m8Wltz%GIXUxKfLYy&C{PCEfFA2Mp!1uN{ zQXQC?Mj=ONsiKWbdx3%yK`k*u$BCzFx0|sB4>(qbdf+}k?QhOs$@NLQWq~hv&1d-h zl+)E8Sw%ON)fcktfj^JSRw}|ZJ@$&jy@3t`58D97!o9rhZibT))l43_CV9A~d-kjj z^p7eF-}(3W!9gc4beLLvk?CNy_xkEvBLS41E``Xn!Y=<7YTy|V25pxZ6h!vC=mZfn7+?en1W1|_ep+J>pHMDzN) zt_GfURw{$_a5IduRG%MVTVP`*u5b3O#Y^zMWS$A8dnrYF$c<}~t+~qtdV*%f6*5KA z2G2!GR$k1e`}E!2CWB0WLT=sJXR_CJ3 zhc&u`-_3F~&zZ_Fb;HZdl0H~|Pc7%zH{F$GLP#ZLen9a1Pr6u9m$j_Fb2zU_EdfdV zz!JoRmHYzc^IrAy4QIG5sm|=~k|?>F?cChzEa1FVJUhLJ2%!;YtMtyF@DGvHO8N-4*|Z=ci#J=PZUwPQDq>h9DJjJ*1Ye|o0lA0?z>KpU+Ov=6`#LHYt`nfgD6G;>kX*chRN|6 z?s*HXNV{>Js93{Xt~d@_5z=< zkdlBDPZzU87io=FSglqW!uOj$e%UPi;?wlJI9f?xnJ-cpZF@sm3$?#a!~c3RxKMqK zd~-^9UMqU<^Z{XZ_Ct4X4Mza>x8XHM41s<+g_7 zs$-o~iqD@!l7cA@fjG+eu0_U?Ph1EyU?9tnr=r4$u7mVh*pffB$UHN$=SdI{hv`Ok z5j1L=ru!x}kROj*cRnDpB3Ob%d3V8@1Sbtz3(PkGJ*G@w!Tx5xE!bT~x#F`mO(UXivU#=t_Cs-MPnb^7EFD z&R6aMo2IOXU7=d0e75vt+yrkUF(_|f#uILxS|NYer|7ItnX9kK`_3$p8BpPhy>F^7 zy@OqgLU41DcH>|Ue|Eb#UCR)EVghLwn7t%o=z3xa3$07Zgh|yl^a2gCSi@)=g7<0|5Qsx9&_I3d|C0*uXQ|TM$oy8Rh&pDT!4~L^CO=a$`(HN7KG}93)kzTd9WZ(NKTZp21 z{?(4y(-Arv+|b0qZo9cAER}yHTIORTTzH+SeKQK~jFn$%;Ic!j#r1{K`j z@~_smRy74*D6}&tW=#=4R}qZ) z9U_e@Si8bq(d)5AIniMzv2>}r9DZZW!%T8}lNQ$c;A6Mc zOE5`vM_aQuziW7lo=>*-n0b}J(x2zg%g1UI$(K9A7E$NAykPb8t`d4V%6Y+Inas79 zJrL)jWpIpHHvc8DW?}2QMw&+vjDLl?dQCl?Ty)}6lO51}G_v{TLaT&%ZdY(&1Mro= z3>wMRhJC8m#(}t7v^IiId-4vPXDJEnp}j;R^kAXNGfjtC_Q@|{5#%Pgc}I(O{lU#l zzt}lBZ16#+WMd&!E%knny4BZ|%E!uT!wiM~vmZRS1|Xs`WJYCTS(K=RE@~uo^<>Le z6#QfTlhn%WbS$wwwK<3`LWb$Fw3(3jK_SO2w-o0Qr0G=;3T zuL|$zNJ_Xv0IKTHV@eSb-#j{l55i)V=Hsa|Idu8-FO;EbIRa5oIdQRx@0R{IK~IeB zUKTzacv%IXNu-x?FbcQhG1mw|?bsMSsJ1$X*?eCCnr&(MDzT77Cz=;=LYpGo!KPN( zWvp4+N-ZoK@d(C&DZYhw=Az8= z-p|xX&=)?Ox`{UQOE~z_|0qZBs;jNp7h$Z@P!>YRq6EEZC2A+Z$Uobi8|-6Zm^8_z z^Zb|WJsfBlo5|T~zL}YNO_?t=%oWLmjDALSTuF*n^*&8{V_qm-7mBf|b4&sM+1Xio z3ki*@iKMotBRAQ4=$q5|i6%c;RN&;?5Pr-tmwL_3(iXzCR}8 zMy13?q^An7Munk1kmb@Zjs>x*&c3_ji+w%?I@Z6&Z#17k*>XURx|lS?%44D(*^0iz zl&$6`Abrd7KDi#g%UsL3L0W6lqVXfVUKQGrFK`U+#0HC_HJ=CC%WD-60tCl6!LlETOjyS?MC>|5dpjD>H9;^ zj&+iK6!`gn z>ti!)?QGLT@SldDzcTNzc) z>RtJ$aF@?}m&kcG;Um z8pYi}p!(l$_0J5uS(-AcS{hwMAg-`}Q(W(P7@0P_9t3SRG{kTmBg4sDIQq0}<^%Q4 z_lKU&9>bnw5AlVc-ZazkUSr6gnKZ9)kj`<`jl}Iax6a+(wDAVH=9|K9?I9=kFW$Y_ zB!z88Yta8?-Yq5C{Dz5EX%!u_^uckzsCunCxMj%e*fFv*VL#W2weuA*^3`mXXk1gN zMFJOV)Ajtp6ibBUK7Yt7NAI#_TxJ*|bZD_Nl-!Y?DrkMMdU^hNhUbtN9>Q8w>j+-+ zamGU0dSi~e^dNnrW=1GQ$49^ewHhsjvta616bVelkJeeA|M63BN0qjx;BMCAds^PU zVZxLIRr4hd2TkmBIKAM8>av$|^f{QMi5<&0e=VDTv_B-{X-d@!xNw$Iw(>~GpYQdk ze*xEbTSkN%hplwyb07H4v{_F3@0#+59y(vB-i!>53!=q}L{6)2AX9!WH=ChL;y=TZ;ZyfEB@6w}!`^COoihk8M&J%o^l_`tQ z6U;}FoaJZH_iCCviae{=yU5-e|N8RfRvW}`6V7hXow87L_$D}4g?pu+FQhhNou*nC zy-ckPlccCM)2%a28ayuB{KE|-17_jy1#Hh#i`pON8@jN%C{mgT-bIQwzqRixBwJB^ zz*LWx<}6Aic>6v`NdA-T(v_o)f4F|uEyIgBvolq*A|??sPdvx>8?2TC-c<7J7kiS`gb9qRh1^} z_8ye1uV53-670+e$iZRP!NPojt45>L3ya>&w9q=OsN}NLHYjUYXgXF2lC}%ei-b)2 zq+ArmS=c)vZ37lu-vyH9qp__1m7337P5T>&a9# zP}zQ`8h-)}V%HN9>++d-wYy#-Fi7N~ds`TXv+UyG2i zz6oEBoP)kOreu1NlGo^i2{hAoCj9{2)I^*Vt(lLXC@u$w`>pitFp=Gm{57U>n+;Q@ zT3=MQfbaeWy|kHpqSxSv^ZHlrSvt6n zU$4|U^#!~(Q?=~F4jQ`tZp;$8Pq-8{({!Xr*k(XYr9znr#@WE*PP@PmfUWDs1% z6?#EV4?Bz}oTSL8FaO#fXUv+x7Lq7ccM_mR%$xqrUZX0C#kn$&0(feH@dS;*w?f6d^2Bi^f8n uMYXd%p6hSs0TDex7NGwBf19cMm3YkFRx$3N1Yt~)8D#}^`J%gzU;ZB|cXSN^ literal 0 HcmV?d00001 diff --git a/tools/analysis_tools/vis.py b/tools/analysis_tools/vis.py index 18d7cb33..107d21c2 100644 --- a/tools/analysis_tools/vis.py +++ b/tools/analysis_tools/vis.py @@ -132,7 +132,7 @@ def main(): res = json.load(open(args.res, 'r')) # load dataset information info_path = \ - args.root_path + '/bevdetv2-nuscenes_infos_%s.pkl' % args.version + args.root_path + '/bevdetv3-nuscenes_infos_%s.pkl' % args.version dataset = pickle.load(open(info_path, 'rb')) # prepare save path and medium vis_dir = args.save_path diff --git a/tools/create_data_bevdet.py b/tools/create_data_bevdet.py index e62ec49a..16a08abe 100644 --- a/tools/create_data_bevdet.py +++ b/tools/create_data_bevdet.py @@ -7,6 +7,8 @@ from pyquaternion import Quaternion from tools.data_converter import nuscenes_converter as nuscenes_converter +from tools.data_converter.create_gt_database import create_groundtruth_database + map_name_from_general_to_detection = { 'human.pedestrian.adult': 'pedestrian', @@ -134,15 +136,20 @@ def add_ann_adj_info(extra_tag): if __name__ == '__main__': dataset = 'nuscenes' - version = 'v1.0' - train_version = f'{version}-trainval' + version = 'v1.0-trainval' + # version = 'v1.0-test' root_path = './data/nuscenes' - extra_tag = 'bevdetv2-nuscenes' + extra_tag = 'bevdetv3-nuscenes' nuscenes_data_prep( root_path=root_path, info_prefix=extra_tag, - version=train_version, - max_sweeps=0) + version=version, + max_sweeps=10) - print('add_ann_infos') + # print('add_ann_infos') add_ann_adj_info(extra_tag) + + create_groundtruth_database('NuScenesDataset', + root_path, + extra_tag, + f'{root_path}/{extra_tag}_infos_train.pkl') \ No newline at end of file diff --git a/tools/data_converter/create_gt_database.py b/tools/data_converter/create_gt_database.py index 210f0e88..bf8f9ca5 100644 --- a/tools/data_converter/create_gt_database.py +++ b/tools/data_converter/create_gt_database.py @@ -8,7 +8,7 @@ from mmcv.ops import roi_align from pycocotools import mask as maskUtils from pycocotools.coco import COCO - +from pyquaternion.quaternion import Quaternion from mmdet3d.core.bbox import box_np_ops as box_np_ops from mmdet3d.datasets import build_dataset from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps @@ -110,17 +110,7 @@ def crop_image_patch(pos_proposals, gt_masks, pos_assigned_gt_inds, org_img): def create_groundtruth_database(dataset_class_name, data_path, info_prefix, - info_path=None, - mask_anno_path=None, - used_classes=None, - database_save_path=None, - db_info_save_path=None, - relative_path=True, - add_rgb=False, - lidar_only=False, - bev_only=False, - coors_range=None, - with_mask=False): + info_path): """Given the raw data, generate the ground truth database. Args: @@ -142,80 +132,46 @@ def create_groundtruth_database(dataset_class_name, with_mask (bool, optional): Whether to use mask. Default: False. """ + used_classes = None + database_save_path = None + db_info_save_path = None + print(f'Create GT Database of {dataset_class_name}') + CLASSES = ('car', 'truck', 'construction_vehicle', 'bus', 'trailer', + 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone') + dataset_cfg = dict( type=dataset_class_name, data_root=data_path, ann_file=info_path) - if dataset_class_name == 'KittiDataset': - file_client_args = dict(backend='disk') - dataset_cfg.update( - test_mode=False, - split='training', - modality=dict( - use_lidar=True, - use_depth=False, - use_lidar_intensity=True, - use_camera=with_mask, - ), - pipeline=[ - dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=4, - use_dim=4, - file_client_args=file_client_args), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True, - file_client_args=file_client_args) - ]) - - elif dataset_class_name == 'NuScenesDataset': - dataset_cfg.update( - use_valid_flag=True, - pipeline=[ - dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=5, - use_dim=5), - dict( - type='LoadPointsFromMultiSweeps', - sweeps_num=10, - use_dim=[0, 1, 2, 3, 4], - pad_empty_sweeps=True, - remove_close=True), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True) - ]) - - elif dataset_class_name == 'WaymoDataset': - file_client_args = dict(backend='disk') - dataset_cfg.update( - test_mode=False, - split='training', - modality=dict( - use_lidar=True, - use_depth=False, - use_lidar_intensity=True, - use_camera=False, + dataset_cfg.update( + use_valid_flag=True, + modality=dict( + use_lidar=True, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False), + img_info_prototype='bevdet', + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True), + dict(type='ToEgo'), + dict(type='LoadAnnotations'), + dict( + type='BEVAug', + bda_aug_conf=None, + classes=[], + is_train=False ), - pipeline=[ - dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=6, - use_dim=6, - file_client_args=file_client_args), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True, - file_client_args=file_client_args) - ]) - + ]) dataset = build_dataset(dataset_cfg) if database_save_path is None: @@ -225,20 +181,17 @@ def create_groundtruth_database(dataset_class_name, f'{info_prefix}_dbinfos_train.pkl') mmcv.mkdir_or_exist(database_save_path) all_db_infos = dict() - if with_mask: - coco = COCO(osp.join(data_path, mask_anno_path)) - imgIds = coco.getImgIds() - file2id = dict() - for i in imgIds: - info = coco.loadImgs([i])[0] - file2id.update({info['file_name']: i}) group_counter = 0 for j in track_iter_progress(list(range(len(dataset)))): input_dict = dataset.get_data_info(j) dataset.pre_pipeline(input_dict) example = dataset.pipeline(input_dict) - annos = example['ann_info'] + annos = dict( + gt_bboxes_3d=example['gt_bboxes_3d'], + gt_labels_3d=example['gt_labels_3d'], + gt_names=[CLASSES[cid] for cid in example['gt_labels_3d']] + ) image_idx = example['sample_idx'] points = example['points'].tensor.numpy() gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() @@ -252,39 +205,25 @@ def create_groundtruth_database(dataset_class_name, if 'difficulty' in annos: difficulty = annos['difficulty'] + # enlarge the bbox acoording to the instance motion num_obj = gt_boxes_3d.shape[0] - point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) - - if with_mask: - # prepare masks - gt_boxes = annos['gt_bboxes'] - img_path = osp.split(example['img_info']['filename'])[-1] - if img_path not in file2id.keys(): - print(f'skip image {img_path} for empty mask') - continue - img_id = file2id[img_path] - kins_annIds = coco.getAnnIds(imgIds=img_id) - kins_raw_info = coco.loadAnns(kins_annIds) - kins_ann_info = _parse_coco_ann_info(kins_raw_info) - h, w = annos['img_shape'][:2] - gt_masks = [ - _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] - ] - # get mask inds based on iou mapping - bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) - mask_inds = bbox_iou.argmax(axis=0) - valid_inds = (bbox_iou.max(axis=0) > 0.5) - - # mask the image - # use more precise crop when it is ready - # object_img_patches = np.ascontiguousarray( - # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) - # crop image patches using roi_align - # object_img_patches = crop_image_patch_v2( - # torch.Tensor(gt_boxes), - # torch.Tensor(mask_inds).long(), object_img_patches) - object_img_patches, object_masks = crop_image_patch( - gt_boxes, gt_masks, mask_inds, annos['img']) + gt_boxes_3d_range = gt_boxes_3d.copy() + relative_velocity = gt_boxes_3d_range[:, 7:] + relative_offset = relative_velocity * 0.5 + yaw = gt_boxes_3d_range[:,6] + s = np.sin(yaw) + c = np.cos(yaw) + rot = np.stack([c, s, -s, c], axis=-1) + rot = rot.reshape(num_obj, 2, 2) + size_offset = rot @ relative_offset.reshape(num_obj,2,1) + size_offset = np.abs(size_offset.reshape(num_obj, 2)) + + gt_boxes_3d_range[:, 3:5] = gt_boxes_3d_range[:, 3:5] + size_offset + gt_boxes_3d_range[:, :2] = gt_boxes_3d_range[:, :2] - relative_offset * 0.5 + + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d_range) + + # vis_points_all(lidar_points=points.copy(), boxes=annos['gt_bboxes_3d'], input_img=input_img) for i in range(num_obj): filename = f'{image_idx}_{names[i]}_{i}.bin' @@ -295,15 +234,6 @@ def create_groundtruth_database(dataset_class_name, gt_points = points[point_indices[:, i]] gt_points[:, :3] -= gt_boxes_3d[i, :3] - if with_mask: - if object_masks[i].sum() == 0 or not valid_inds[i]: - # Skip object for empty or invalid mask - continue - img_patch_path = abs_filepath + '.png' - mask_patch_path = abs_filepath + '.mask.png' - mmcv.imwrite(object_img_patches[i], img_patch_path) - mmcv.imwrite(object_masks[i], mask_patch_path) - with open(abs_filepath, 'w') as f: gt_points.tofile(f) @@ -325,8 +255,6 @@ def create_groundtruth_database(dataset_class_name, db_info['group_id'] = group_dict[local_group_id] if 'score' in annos: db_info['score'] = annos['score'][i] - if with_mask: - db_info.update({'box2d_camera': gt_boxes[i]}) if names[i] in all_db_infos: all_db_infos[names[i]].append(db_info) else: @@ -337,288 +265,3 @@ def create_groundtruth_database(dataset_class_name, with open(db_info_save_path, 'wb') as f: pickle.dump(all_db_infos, f) - - -class GTDatabaseCreater: - """Given the raw data, generate the ground truth database. This is the - parallel version. For serialized version, please refer to - `create_groundtruth_database` - - Args: - dataset_class_name (str): Name of the input dataset. - data_path (str): Path of the data. - info_prefix (str): Prefix of the info file. - info_path (str, optional): Path of the info file. - Default: None. - mask_anno_path (str, optional): Path of the mask_anno. - Default: None. - used_classes (list[str], optional): Classes have been used. - Default: None. - database_save_path (str, optional): Path to save database. - Default: None. - db_info_save_path (str, optional): Path to save db_info. - Default: None. - relative_path (bool, optional): Whether to use relative path. - Default: True. - with_mask (bool, optional): Whether to use mask. - Default: False. - num_worker (int, optional): the number of parallel workers to use. - Default: 8. - """ - - def __init__(self, - dataset_class_name, - data_path, - info_prefix, - info_path=None, - mask_anno_path=None, - used_classes=None, - database_save_path=None, - db_info_save_path=None, - relative_path=True, - add_rgb=False, - lidar_only=False, - bev_only=False, - coors_range=None, - with_mask=False, - num_worker=8) -> None: - self.dataset_class_name = dataset_class_name - self.data_path = data_path - self.info_prefix = info_prefix - self.info_path = info_path - self.mask_anno_path = mask_anno_path - self.used_classes = used_classes - self.database_save_path = database_save_path - self.db_info_save_path = db_info_save_path - self.relative_path = relative_path - self.add_rgb = add_rgb - self.lidar_only = lidar_only - self.bev_only = bev_only - self.coors_range = coors_range - self.with_mask = with_mask - self.num_worker = num_worker - self.pipeline = None - - def create_single(self, input_dict): - group_counter = 0 - single_db_infos = dict() - example = self.pipeline(input_dict) - annos = example['ann_info'] - image_idx = example['sample_idx'] - points = example['points'].tensor.numpy() - gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() - names = annos['gt_names'] - group_dict = dict() - if 'group_ids' in annos: - group_ids = annos['group_ids'] - else: - group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) - difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) - if 'difficulty' in annos: - difficulty = annos['difficulty'] - - num_obj = gt_boxes_3d.shape[0] - point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) - - if self.with_mask: - # prepare masks - gt_boxes = annos['gt_bboxes'] - img_path = osp.split(example['img_info']['filename'])[-1] - if img_path not in self.file2id.keys(): - print(f'skip image {img_path} for empty mask') - return single_db_infos - img_id = self.file2id[img_path] - kins_annIds = self.coco.getAnnIds(imgIds=img_id) - kins_raw_info = self.coco.loadAnns(kins_annIds) - kins_ann_info = _parse_coco_ann_info(kins_raw_info) - h, w = annos['img_shape'][:2] - gt_masks = [ - _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] - ] - # get mask inds based on iou mapping - bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) - mask_inds = bbox_iou.argmax(axis=0) - valid_inds = (bbox_iou.max(axis=0) > 0.5) - - # mask the image - # use more precise crop when it is ready - # object_img_patches = np.ascontiguousarray( - # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) - # crop image patches using roi_align - # object_img_patches = crop_image_patch_v2( - # torch.Tensor(gt_boxes), - # torch.Tensor(mask_inds).long(), object_img_patches) - object_img_patches, object_masks = crop_image_patch( - gt_boxes, gt_masks, mask_inds, annos['img']) - - for i in range(num_obj): - filename = f'{image_idx}_{names[i]}_{i}.bin' - abs_filepath = osp.join(self.database_save_path, filename) - rel_filepath = osp.join(f'{self.info_prefix}_gt_database', - filename) - - # save point clouds and image patches for each object - gt_points = points[point_indices[:, i]] - gt_points[:, :3] -= gt_boxes_3d[i, :3] - - if self.with_mask: - if object_masks[i].sum() == 0 or not valid_inds[i]: - # Skip object for empty or invalid mask - continue - img_patch_path = abs_filepath + '.png' - mask_patch_path = abs_filepath + '.mask.png' - mmcv.imwrite(object_img_patches[i], img_patch_path) - mmcv.imwrite(object_masks[i], mask_patch_path) - - with open(abs_filepath, 'w') as f: - gt_points.tofile(f) - - if (self.used_classes is None) or names[i] in self.used_classes: - db_info = { - 'name': names[i], - 'path': rel_filepath, - 'image_idx': image_idx, - 'gt_idx': i, - 'box3d_lidar': gt_boxes_3d[i], - 'num_points_in_gt': gt_points.shape[0], - 'difficulty': difficulty[i], - } - local_group_id = group_ids[i] - # if local_group_id >= 0: - if local_group_id not in group_dict: - group_dict[local_group_id] = group_counter - group_counter += 1 - db_info['group_id'] = group_dict[local_group_id] - if 'score' in annos: - db_info['score'] = annos['score'][i] - if self.with_mask: - db_info.update({'box2d_camera': gt_boxes[i]}) - if names[i] in single_db_infos: - single_db_infos[names[i]].append(db_info) - else: - single_db_infos[names[i]] = [db_info] - - return single_db_infos - - def create(self): - print(f'Create GT Database of {self.dataset_class_name}') - dataset_cfg = dict( - type=self.dataset_class_name, - data_root=self.data_path, - ann_file=self.info_path) - if self.dataset_class_name == 'KittiDataset': - file_client_args = dict(backend='disk') - dataset_cfg.update( - test_mode=False, - split='training', - modality=dict( - use_lidar=True, - use_depth=False, - use_lidar_intensity=True, - use_camera=self.with_mask, - ), - pipeline=[ - dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=4, - use_dim=4, - file_client_args=file_client_args), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True, - file_client_args=file_client_args) - ]) - - elif self.dataset_class_name == 'NuScenesDataset': - dataset_cfg.update( - use_valid_flag=True, - pipeline=[ - dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=5, - use_dim=5), - dict( - type='LoadPointsFromMultiSweeps', - sweeps_num=10, - use_dim=[0, 1, 2, 3, 4], - pad_empty_sweeps=True, - remove_close=True), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True) - ]) - - elif self.dataset_class_name == 'WaymoDataset': - file_client_args = dict(backend='disk') - dataset_cfg.update( - test_mode=False, - split='training', - modality=dict( - use_lidar=True, - use_depth=False, - use_lidar_intensity=True, - use_camera=False, - ), - pipeline=[ - dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=6, - use_dim=6, - file_client_args=file_client_args), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True, - file_client_args=file_client_args) - ]) - - dataset = build_dataset(dataset_cfg) - self.pipeline = dataset.pipeline - if self.database_save_path is None: - self.database_save_path = osp.join( - self.data_path, f'{self.info_prefix}_gt_database') - if self.db_info_save_path is None: - self.db_info_save_path = osp.join( - self.data_path, f'{self.info_prefix}_dbinfos_train.pkl') - mmcv.mkdir_or_exist(self.database_save_path) - if self.with_mask: - self.coco = COCO(osp.join(self.data_path, self.mask_anno_path)) - imgIds = self.coco.getImgIds() - self.file2id = dict() - for i in imgIds: - info = self.coco.loadImgs([i])[0] - self.file2id.update({info['file_name']: i}) - - def loop_dataset(i): - input_dict = dataset.get_data_info(i) - dataset.pre_pipeline(input_dict) - return input_dict - - multi_db_infos = mmcv.track_parallel_progress( - self.create_single, ((loop_dataset(i) - for i in range(len(dataset))), len(dataset)), - self.num_worker) - print('Make global unique group id') - group_counter_offset = 0 - all_db_infos = dict() - for single_db_infos in track_iter_progress(multi_db_infos): - group_id = -1 - for name, name_db_infos in single_db_infos.items(): - for db_info in name_db_infos: - group_id = max(group_id, db_info['group_id']) - db_info['group_id'] += group_counter_offset - if name not in all_db_infos: - all_db_infos[name] = [] - all_db_infos[name].extend(name_db_infos) - group_counter_offset += (group_id + 1) - - for k, v in all_db_infos.items(): - print(f'load {len(v)} {k} database infos') - - with open(self.db_info_save_path, 'wb') as f: - pickle.dump(all_db_infos, f) diff --git a/tools/test.py b/tools/test.py index ad61e21b..c6692478 100644 --- a/tools/test.py +++ b/tools/test.py @@ -211,6 +211,8 @@ def main(): if not args.no_aavt: if '4D' in cfg.model.type: cfg.model.align_after_view_transfromation=True + if 'num_proposals_test' in cfg and cfg.model.type=='DAL': + cfg.model.pts_bbox_head.num_proposals=cfg.num_proposals_test cfg.model.train_cfg = None model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) fp16_cfg = cfg.get('fp16', None) diff --git a/tools/train.py b/tools/train.py index 749322dc..72a1579a 100644 --- a/tools/train.py +++ b/tools/train.py @@ -247,6 +247,20 @@ def main(): CLASSES=datasets[0].CLASSES, PALETTE=datasets[0].PALETTE # for segmentors if hasattr(datasets[0], 'PALETTE') else None) + + # remove objectsample augmentation in the 2nd stage + if cfg.get('two_stage', False): + assert len(cfg.workflow) == 1 + assert cfg.data.train.dataset.pipeline[5].type =='ObjectSample' + assert len(cfg.data.train.dataset.pipeline)==15 + cfg.data.train.dataset.pipeline = \ + [cfg.data.train.dataset.pipeline[kid] for kid in + [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13]] + cfg.data.train.dataset.pipeline.append( + dict(type='Collect3D', + keys=['points', 'gt_bboxes_3d', 'gt_depth', + 'gt_labels_3d', 'img_inputs'])) + datasets.append(build_dataset(cfg.data.train)) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES train_model(